code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split("." ):
A : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
A : str = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
A : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A : str = value
elif weight_type == "weight_g":
A : Dict = value
elif weight_type == "weight_v":
A : List[str] = value
elif weight_type == "bias":
A : str = value
else:
A : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : List[Any] = []
A : Optional[int] = fairseq_model.state_dict()
A : Any = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
A : Dict = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
A : str = True
if "*" in mapped_key:
A : str = name.split(_lowerCamelCase )[0].split("." )[-2]
A : Tuple = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
A : List[str] = "weight_g"
elif "weight_v" in name:
A : int = "weight_v"
elif "weight" in name:
A : Tuple = "weight"
elif "bias" in name:
A : int = "bias"
else:
A : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : str = full_name.split("conv_layers." )[-1]
A : Union[str, Any] = name.split("." )
A : List[Any] = int(items[0] )
A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
if config_path is not None:
A : List[Any] = HubertConfig.from_pretrained(_lowerCamelCase )
else:
A : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
A : Optional[int] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : Dict = target_dict.pad_index
A : Any = target_dict.bos_index
A : Tuple = target_dict.eos_index
A : int = len(target_dict.symbols )
A : Tuple = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
A : Optional[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
A : Dict = True if config.feat_extract_norm == "layer" else False
A : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
A : int = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
A : str = HubertForCTC(_lowerCamelCase )
else:
A : int = HubertModel(_lowerCamelCase )
if is_finetuned:
A : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A : Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 703 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__SCREAMING_SNAKE_CASE = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ):
A : Union[str, Any] = "https://pypi.org/pypi/diffusers/json"
A : List[Any] = json.loads(request.urlopen(_lowerCamelCase ).read() )["releases"].keys()
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : version.Version(_lowerCamelCase ) )
def UpperCAmelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : List[Any] = Path(_lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
init_hf_modules()
A : Tuple = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
A : Optional[int] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
A : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[int] = False
A : Tuple = [module_file]
A : Optional[int] = []
# Let's recurse through all relative imports
while not no_change:
A : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
A : Optional[Any] = Path(_lowerCamelCase ).parent
A : List[str] = [str(module_path / m ) for m in new_imports]
A : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
A : Union[str, Any] = [f"""{f}.py""" for f in new_import_files]
A : Tuple = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
A : Dict = f.read()
# Imports of the form `import xxx`
A : List[str] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
A : Optional[int] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
A : Any = list(set(_lowerCamelCase ) )
A : Tuple = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`""" )
return get_relative_imports(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : int = module_path.replace(os.path.sep , "." )
A : Optional[Any] = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase ):
from ..pipelines import DiffusionPipeline
A : int = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
A : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
A : Any = cls
return pipeline_class
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
A : List[Any] = str(_lowerCamelCase )
A : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
A : Union[str, Any] = module_file_or_url
A : Any = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
A : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A : Union[str, Any] = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
A : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A : Optional[Any] = f"""v{revision}"""
elif revision == "main":
A : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
A : Dict = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
A : Optional[int] = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = "git"
A : Any = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A : Any = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
A : Optional[Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A : List[str] = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
A : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
A : Optional[int] = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
A : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = use_auth_token
elif use_auth_token is True:
A : Dict = HfFolder.get_token()
else:
A : Tuple = None
A : List[str] = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A : str = submodule_path / commit_hash
A : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f"""{module_needed}.py""" , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
A : int = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace(".py" , "" ) ) | 17 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : str = 0
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A : Any = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A : Any = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
A : str = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
A : Optional[int] = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
A : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
A : Optional[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowerCamelCase , "vocab.txt" ) )
A : Optional[int] = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type="bert" , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowerCamelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowerCamelCase , "merges.txt" ) )
A : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type="gpt2" , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowerCamelCase , "vocab.txt" ) )
A : str = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type="bert" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowerCamelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowerCamelCase , "merges.txt" ) )
A : Optional[int] = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type="gpt2" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A : Optional[int] = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
A : Dict = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A : List[Any] = TOKENIZER_MAPPING.values()
A : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , __lowerCamelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
A : int = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=__lowerCamelCase )
A : List[str] = "Hello, world. How are you?"
A : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual("[UNK]" , tokens[0] )
A : Dict = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=__lowerCamelCase )
A : Any = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
A : List[str] = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : Tuple = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
A : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
A : int = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
# Check we can load the tokenizer config of an online model.
A : List[Any] = get_tokenizer_config("bert-base-cased" )
A : str = config.pop("_commit_hash" , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A : Optional[Any] = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A : Optional[int] = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
A : Dict = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
try:
AutoConfig.register("custom" , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
A : Dict = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
A : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
try:
AutoConfig.register("custom" , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A : Dict = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
A : List[str] = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
A : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
A : Tuple = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
A : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
A : Dict = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase )
A : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
A : str = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
A : int = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
A : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = False
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = NewTokenizer
a__ = False
try:
AutoConfig.register("custom" , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
A : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
A : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A : Optional[int] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
A : List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A : Any = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
A : List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
A : Any = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
A : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
A : Optional[int] = AutoTokenizer.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
# Make sure we have cached the tokenizer.
A : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
A : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 704 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , ) -> Dict:
A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
A : Dict = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
A : List[Any] = [log for log in logs if "eval_loss" in log.keys()]
A : Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[str] ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A : Dict = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
A : List[str] = experiments[experiment_id]
A : Union[str, Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
A : Union[str, Any] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] )
A : Dict = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : int = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , )
# Check metrics
A : str = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : Dict = [log for log in logs if "eval_loss" in log.keys()]
A : Dict = eval_metrics[0]
A : int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
# test if do_predict saves generations and metrics
A : Optional[Any] = os.listdir(__lowerCamelCase )
A : Any = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
A : Optional[int] = "--skip_memory_metrics 0"
A : str = self.run_trainer(
max_len=1_28 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
A : Union[str, Any] = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history
A : str = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
A : List[Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
A : int = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A : int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig
A : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A : Tuple = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> List[str]:
A : Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
A : Optional[int] = self.get_auto_remove_tmp_dir()
A : int = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
A : Optional[Any] = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCamelCase )}
""".split()
A : Optional[Any] = "\n --do_predict\n ".split()
A : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A : Dict = get_gpu_count()
A : Any = get_torch_dist_unique_port()
A : Optional[Any] = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
A : Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
A : List[Any] = ["run_translation.py"] + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
main()
return output_dir | 17 | 0 |
from maths.prime_check import is_prime
def UpperCAmelCase ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCamelCase )
if is_prime(_lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
from collections.abc import Sequence
def UpperCAmelCase ( _lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A : Dict = nums[0]
for i in range(1 , len(_lowerCamelCase ) ):
A : Tuple = nums[i]
A : List[Any] = max(_lowerCamelCase , ans + num , _lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip())
__SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array)) | 17 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int:
A : Optional[int] = parent
A : List[str] = batch_size
A : Tuple = image_size
A : List[str] = num_channels
A : List[str] = embeddings_size
A : List[str] = hidden_sizes
A : str = depths
A : Optional[Any] = is_training
A : int = use_labels
A : Optional[int] = hidden_act
A : List[Any] = num_labels
A : List[str] = scope
A : str = len(__lowerCamelCase )
A : Optional[int] = out_features
A : str = out_indices
A : Optional[int] = num_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : Any = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
A : Union[str, Any] = self.num_labels
A : List[str] = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]:
A : Dict = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Optional[Any] = None
A : Optional[int] = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
A : List[str] = self.prepare_config_and_inputs()
A : Tuple = config_and_inputs
A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Any = BitModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason="Bit does not output attentions" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
A : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : Union[str, Any] = BitModelTester(self ) | 706 |
from math import sqrt
def UpperCAmelCase ( _lowerCamelCase = 100_0000 ):
A : int = 0
A : int = 0
A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 17 | 0 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **__lowerCamelCase : List[Any] ) -> Optional[Any]:
A : int = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
A : int = self.scheduler_classes[0]
A : str = self.get_scheduler_config()
A : int = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A : Optional[Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : Any = scheduler_class(**__lowerCamelCase )
A : Dict = len(__lowerCamelCase )
A : List[Any] = self.dummy_model()
A : Optional[int] = self.dummy_sample_deter
A : List[Any] = self.dummy_sample_deter + 0.1
A : Dict = self.dummy_sample_deter - 0.1
A : Optional[int] = samplea.shape[0]
A : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
A : int = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
A : Optional[int] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A : List[str] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A : Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : Tuple = scheduler_class(**__lowerCamelCase )
A : Any = len(__lowerCamelCase )
A : Dict = self.dummy_model()
A : Optional[int] = self.dummy_sample_deter
A : Dict = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
A : List[str] = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A : Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
A : Union[str, Any] = pred_prev_sample
A : int = torch.sum(torch.abs(__lowerCamelCase ) )
A : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
A : Tuple = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config(prediction_type="v_prediction" )
A : Optional[int] = scheduler_class(**__lowerCamelCase )
A : Union[str, Any] = len(__lowerCamelCase )
A : str = self.dummy_model()
A : Tuple = self.dummy_sample_deter
A : int = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
A : int = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A : List[Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
A : List[str] = pred_prev_sample
A : Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
A : Any = self.scheduler_classes[0]
A : Optional[int] = self.get_scheduler_config()
A : Tuple = scheduler_class(**__lowerCamelCase )
A : Dict = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCamelCase )
A : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(__lowerCamelCase ):
if i == len(__lowerCamelCase ) - 1:
A : List[Any] = -1
else:
A : Optional[int] = timesteps[i + 1]
A : Optional[Any] = scheduler.previous_timestep(__lowerCamelCase )
A : Optional[int] = prev_t.item()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
A : Union[str, Any] = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config()
A : str = scheduler_class(**__lowerCamelCase )
A : Dict = [1_00, 87, 50, 51, 0]
with self.assertRaises(__lowerCamelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
A : List[str] = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config()
A : Tuple = scheduler_class(**__lowerCamelCase )
A : Any = [1_00, 87, 50, 1, 0]
A : Optional[Any] = len(__lowerCamelCase )
with self.assertRaises(__lowerCamelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
A : List[Any] = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config()
A : int = scheduler_class(**__lowerCamelCase )
A : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__lowerCamelCase ) | 707 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE = """."""
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE = line.strip()
__SCREAMING_SNAKE_CASE = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""") | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase = 0 ):
A : List[str] = length or len(_lowerCamelCase )
A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A : Optional[Any] = list_data[i + 1], list_data[i]
A : List[Any] = True
return list_data if not swapped else bubble_sort(_lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 708 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str:
A : List[Any] = parent
A : Optional[int] = batch_size
A : Any = image_size
A : Optional[Any] = patch_size
A : Optional[Any] = num_channels
A : Tuple = is_training
A : Optional[Any] = use_labels
A : Union[str, Any] = hidden_size
A : Tuple = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Tuple = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Tuple = initializer_range
A : List[Any] = scope
A : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[str] = (image_size // patch_size) ** 2
A : List[str] = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int:
A : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any:
A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : List[str] = 1
A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict:
A : str = self.type_sequence_label_size
A : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Any = 1
A : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Dict = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : str = DeiTModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Any = [*signature.parameters.keys()]
A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str:
A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Tuple = False
A : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
A : Tuple = problem_type["title"]
A : Optional[Any] = problem_type["num_labels"]
A : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
A : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : List[str] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
A : Dict = self.default_image_processor
A : Optional[int] = prepare_img()
A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" )
A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A : List[str] = model(__lowerCamelCase ) | 17 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "encoder-decoder"
a__ = True
def __init__( self : Dict , **__lowerCamelCase : List[Any] ) -> Union[str, Any]:
super().__init__(**__lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A : List[Any] = kwargs.pop("encoder" )
A : List[Any] = encoder_config.pop("model_type" )
A : Optional[int] = kwargs.pop("decoder" )
A : Optional[int] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
A : int = AutoConfig.for_model(__lowerCamelCase , **__lowerCamelCase )
A : str = AutoConfig.for_model(__lowerCamelCase , **__lowerCamelCase )
A : int = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , __lowerCamelCase : PretrainedConfig , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : List[Any] ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
A : Optional[int] = True
A : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
A : List[str] = copy.deepcopy(self.__dict__ )
A : List[Any] = self.encoder.to_dict()
A : Union[str, Any] = self.decoder.to_dict()
A : Dict = self.__class__.model_type
return output | 709 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score} | 17 | 0 |
import colorsys
from PIL import Image # type: ignore
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Tuple = x
A : List[str] = y
for step in range(_lowerCamelCase ): # noqa: B007
A : Union[str, Any] = a * a - b * b + x
A : Tuple = 2 * a * b + y
A : Union[str, Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCAmelCase ( _lowerCamelCase ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCAmelCase ( _lowerCamelCase ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowerCamelCase , 1 , 1 ) )
def UpperCAmelCase ( _lowerCamelCase = 800 , _lowerCamelCase = 600 , _lowerCamelCase = -0.6 , _lowerCamelCase = 0 , _lowerCamelCase = 3.2 , _lowerCamelCase = 50 , _lowerCamelCase = True , ):
A : Union[str, Any] = Image.new("RGB" , (image_width, image_height) )
A : int = img.load()
# loop through the image-coordinates
for image_x in range(_lowerCamelCase ):
for image_y in range(_lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
A : str = figure_width / image_width * image_height
A : str = figure_center_x + (image_x / image_width - 0.5) * figure_width
A : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
A : Union[str, Any] = get_distance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
A : str = get_color_coded_rgb(_lowerCamelCase )
else:
A : Dict = get_black_and_white_rgb(_lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 710 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[int]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
A : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : Dict = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 17 | 0 |
def UpperCAmelCase ( _lowerCamelCase ):
if num <= 0:
raise ValueError("Input must be a positive integer" )
A : Dict = [True] * (num + 1)
A : Dict = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _lowerCamelCase ):
A : List[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num)) | 711 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 17 | 0 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "mask2former"
a__ = ["swin"]
a__ = {"hidden_size": "hidden_dim"}
def __init__( self : List[str] , __lowerCamelCase : Optional[Dict] = None , __lowerCamelCase : int = 2_56 , __lowerCamelCase : int = 2_56 , __lowerCamelCase : int = 2_56 , __lowerCamelCase : int = 10_24 , __lowerCamelCase : str = "relu" , __lowerCamelCase : int = 6 , __lowerCamelCase : int = 10 , __lowerCamelCase : int = 8 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 20_48 , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : int = 4 , __lowerCamelCase : int = 2_55 , __lowerCamelCase : int = 1_00 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 2.0 , __lowerCamelCase : float = 5.0 , __lowerCamelCase : float = 5.0 , __lowerCamelCase : int = 1_25_44 , __lowerCamelCase : float = 3.0 , __lowerCamelCase : float = 0.75 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : bool = True , __lowerCamelCase : List[int] = [4, 8, 16, 32] , __lowerCamelCase : bool = None , **__lowerCamelCase : int , ) -> List[str]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
A : List[Any] = CONFIG_MAPPING["swin"](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : List[str] = backbone_config.pop("model_type" )
A : str = CONFIG_MAPPING[backbone_model_type]
A : List[str] = config_class.from_dict(__lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
A : int = backbone_config
A : Optional[Any] = feature_size
A : int = mask_feature_size
A : str = hidden_dim
A : List[Any] = encoder_feedforward_dim
A : int = activation_function
A : Optional[int] = encoder_layers
A : Tuple = decoder_layers
A : Dict = num_attention_heads
A : int = dropout
A : List[str] = dim_feedforward
A : Optional[int] = pre_norm
A : List[str] = enforce_input_projection
A : int = common_stride
A : Dict = ignore_value
A : Tuple = num_queries
A : Union[str, Any] = no_object_weight
A : Union[str, Any] = class_weight
A : int = mask_weight
A : int = dice_weight
A : Optional[int] = train_num_points
A : int = oversample_ratio
A : Dict = importance_sample_ratio
A : Tuple = init_std
A : List[str] = init_xavier_std
A : int = use_auxiliary_loss
A : Union[str, Any] = feature_strides
A : int = output_auxiliary_logits
A : Union[str, Any] = decoder_layers
super().__init__(**__lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : Any ) -> Optional[int]:
return cls(
backbone_config=__lowerCamelCase , **__lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict[str, any]:
A : Optional[Any] = copy.deepcopy(self.__dict__ )
A : List[Any] = self.backbone_config.to_dict()
A : List[Any] = self.__class__.model_type
return output
| 712 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]:
A : str = max_length
A : Optional[int] = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool:
A : List[Any] = input_ids.shape[-1]
A : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
A : str = start_length
A : Optional[Any] = max_new_tokens
A : Dict = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]:
A : str = max_time
A : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = stopping_criteria.max_length
A : Any = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria | 17 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : List[str] = get_activation("swish" )
self.assertIsInstance(__lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : Any = get_activation("silu" )
self.assertIsInstance(__lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
A : Optional[Any] = get_activation("mish" )
self.assertIsInstance(__lowerCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
A : Any = get_activation("gelu" )
self.assertIsInstance(__lowerCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) | 713 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ):
A : str = symbols(_lowerCamelCase )
A : int = lambdify(_lowerCamelCase , _lowerCamelCase )
A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
A : Optional[int] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""") | 17 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Dict=3 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : Union[str, Any]=4_00 , __lowerCamelCase : str=True , __lowerCamelCase : str=None , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : int=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Tuple = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
A : Any = parent
A : Optional[int] = batch_size
A : List[Any] = num_channels
A : str = min_resolution
A : Optional[int] = max_resolution
A : Optional[int] = do_resize
A : List[Any] = size
A : Union[str, Any] = do_normalize
A : str = image_mean
A : Optional[Any] = image_std
A : Any = do_rescale
A : Union[str, Any] = rescale_factor
A : Dict = do_pad
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=False ) -> Optional[int]:
if not batched:
A : List[str] = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
A : Tuple = image.size
else:
A : int = image.shape[1], image.shape[2]
if w < h:
A : Dict = int(self.size["shortest_edge"] * h / w )
A : List[Any] = self.size["shortest_edge"]
elif w > h:
A : Optional[int] = self.size["shortest_edge"]
A : Optional[int] = int(self.size["shortest_edge"] * w / h )
else:
A : Union[str, Any] = self.size["shortest_edge"]
A : List[Any] = self.size["shortest_edge"]
else:
A : Dict = []
for image in image_inputs:
A : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Optional[int] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
A : List[Any] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = DetaImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A : Optional[Any] = DetaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
# Initialize image_processing
A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
A : List[str] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
# Initialize image_processing
A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A : Dict = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : List[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
A : int = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processing
A : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
A : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Optional[int] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
A : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
# prepare image and target
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A : Optional[int] = json.loads(f.read() )
A : Tuple = {"image_id": 3_97_69, "annotations": target}
# encode them
A : int = DetaImageProcessor()
A : Any = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
A : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
A : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
A : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
A : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
A : Tuple = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
A : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
A : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
A : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
# prepare image, target and masks_path
A : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A : Optional[int] = json.loads(f.read() )
A : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
A : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A : List[Any] = DetaImageProcessor(format="coco_panoptic" )
A : Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
A : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
A : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
A : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
A : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
A : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
A : Optional[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
A : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
A : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
A : Tuple = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
A : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
A : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) ) | 714 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 16384,
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LEDTokenizer
a__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
A : Any = add_prefix_space
A : Tuple = pre_tok_class(**__lowerCamelCase )
A : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A : List[str] = "post_processor"
A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
A : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
A : str = tuple(state["cls"] )
A : int = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : List[Any] = add_prefix_space
A : Dict = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
A : Dict = trim_offsets
A : str = True
if changes_to_apply:
A : int = getattr(__lowerCamelCase , state.pop("type" ) )
A : Dict = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict:
A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
A : Tuple = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]:
A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : str = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict:
A : Dict = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
A : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 17 | 0 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "M-CLIP"
def __init__( self : Dict , __lowerCamelCase : List[str]=10_24 , __lowerCamelCase : Optional[Any]=7_68 , **__lowerCamelCase : List[Any] ) -> Optional[int]:
A : List[str] = transformerDimSize
A : Dict = imageDimSize
super().__init__(**__lowerCamelCase )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = MCLIPConfig
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
super().__init__(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
A : Dict = XLMRobertaModel(__lowerCamelCase )
A : List[str] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> int:
A : Any = self.transformer(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
A : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__lowerCamelCase ), embs | 715 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"question": Value("string" ), "context": Value("string" )} )
a__ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"} | 17 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__SCREAMING_SNAKE_CASE = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__SCREAMING_SNAKE_CASE = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__SCREAMING_SNAKE_CASE = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]="auto" , __lowerCamelCase : List[Any]=-1 , __lowerCamelCase : Dict=0.9 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : List[Any]=5_00 , __lowerCamelCase : str="gpt2-large" , __lowerCamelCase : Optional[Any]=-1 , __lowerCamelCase : Optional[int]=10_24 , __lowerCamelCase : Optional[Any]=25 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=25 , ) -> List[Any]:
A : List[Any] = compute_mauve(
p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , )
return out | 716 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int:
A : Optional[int] = parent
A : List[str] = batch_size
A : Tuple = image_size
A : List[str] = num_channels
A : List[str] = embeddings_size
A : List[str] = hidden_sizes
A : str = depths
A : Optional[Any] = is_training
A : int = use_labels
A : Optional[int] = hidden_act
A : List[Any] = num_labels
A : List[str] = scope
A : str = len(__lowerCamelCase )
A : Optional[int] = out_features
A : str = out_indices
A : Optional[int] = num_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : Any = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
A : Union[str, Any] = self.num_labels
A : List[str] = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]:
A : Dict = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Optional[Any] = None
A : Optional[int] = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
A : List[str] = self.prepare_config_and_inputs()
A , A , A : Tuple = config_and_inputs
A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Any = BitModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason="Bit does not output attentions" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
A : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : Union[str, Any] = BitModelTester(self ) | 17 | 0 |
'''simple docstring'''
def UpperCAmelCase ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Dict = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCamelCase )
if number < 1:
A : Tuple = f"""Input value of [number={number}] must be > 0"""
raise ValueError(_lowerCamelCase )
A : int = 1
for i in range(1 , _lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
A : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Tuple = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
A : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Tuple = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
A : str = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
A : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
A : List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A : Optional[int] = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) ) | 17 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
A : Optional[Any] = 0
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
A : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
A : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
A : str = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A : Optional[int] = os.path.join(__lowerCamelCase , "fake-roberta" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
A : int = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
try:
AutoConfig.register("custom" , __lowerCamelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoConfig.register("model" , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoConfig.register("bert" , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
A : Optional[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
A : str = AutoConfig.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
with self.assertRaisesRegex(
__lowerCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
with self.assertRaisesRegex(
__lowerCamelCase , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
A : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
A : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
A : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase )
A : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
A : List[Any] = AutoConfig.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "new-model"
try:
AutoConfig.register("new-model" , __lowerCamelCase )
# If remote code is not set, the default is to use local
A : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
A : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
A : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"] | 718 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , ) -> str:
A : Optional[Any] = parent
A : Optional[int] = batch_size
A : List[str] = image_size
A : List[str] = num_channels
A : Tuple = embeddings_size
A : Optional[int] = hidden_sizes
A : Dict = depths
A : Optional[int] = is_training
A : List[str] = use_labels
A : List[Any] = hidden_act
A : Optional[int] = num_labels
A : int = scope
A : List[Any] = len(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : List[str] = TFRegNetModel(config=__lowerCamelCase )
A : str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[str]:
A : List[Any] = self.num_labels
A : int = TFRegNetForImageClassification(__lowerCamelCase )
A : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
A : Any = self.prepare_config_and_inputs()
A , A , A : str = config_and_inputs
A : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a__ = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Optional[Any] = TFRegNetModelTester(self )
A : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
A : int = model_class(__lowerCamelCase )
A : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : List[str] = layer_type
A : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]={} ):
A : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
A : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(__lowerCamelCase : List[str] , __lowerCamelCase : Any ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
A : Tuple = model_class(__lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
A : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A : Optional[int] = self.default_image_processor
A : List[Any] = prepare_img()
A : str = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
A : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
A : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) | 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 719 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = (PNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **__lowerCamelCase : str ) -> Optional[Any]:
A : Union[str, Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 , **__lowerCamelCase : Any ) -> Tuple:
A : Dict = dict(self.forward_default_kwargs )
A : Dict = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : Union[str, Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Any = self.get_scheduler_config(**__lowerCamelCase )
A : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : Dict = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Tuple ) -> str:
A : List[str] = dict(self.forward_default_kwargs )
A : Optional[int] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
A : List[str] = self.dummy_sample
A : Any = 0.1 * sample
A : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A : Tuple = self.get_scheduler_config()
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
A : str = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[:]
A : Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A : Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **__lowerCamelCase : Any ) -> Union[str, Any]:
A : Optional[Any] = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(**__lowerCamelCase )
A : str = scheduler_class(**__lowerCamelCase )
A : List[str] = 10
A : Union[str, Any] = self.dummy_model()
A : int = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
A : Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A : Tuple = model(__lowerCamelCase , __lowerCamelCase )
A : Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
A : Union[str, Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop("num_inference_steps" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
A : List[Any] = self.dummy_sample
A : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ):
A : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A : Tuple = dummy_past_residuals[:]
A : Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A : Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
A : str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
A : Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A : str = 27
for scheduler_class in self.scheduler_classes:
A : Tuple = self.dummy_sample
A : List[Any] = 0.1 * sample
A : List[Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A : Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
with self.assertRaises(__lowerCamelCase ):
A : Union[str, Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
A : Optional[Any] = self.full_loop()
A : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
A : Any = self.full_loop(prediction_type="v_prediction" )
A : Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : List[str] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
A : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
A : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
A : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3 | 17 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__SCREAMING_SNAKE_CASE = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int = 14 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
A : Any = primes[group]["prime"]
A : str = primes[group]["generator"]
A : List[str] = int(hexlify(urandom(32 ) ) , base=16 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
return hex(self.__private_key )[2:]
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(__lowerCamelCase )[2:]
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str ) -> str:
A : List[str] = int(__lowerCamelCase , base=16 )
if not self.is_valid_public_key(__lowerCamelCase ):
raise ValueError("Invalid public key" )
A : str = pow(__lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(__lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE__ ( __lowerCamelCase : int , __lowerCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__lowerCamelCase , (prime - 1) // 2 , __lowerCamelCase ) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int = 14 ) -> str:
A : Any = int(__lowerCamelCase , base=16 )
A : Tuple = int(__lowerCamelCase , base=16 )
A : Optional[Any] = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("Invalid public key" )
A : Dict = pow(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return shaaaa(str(__lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
A : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=10 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=3 , __lowerCamelCase : List[str]=0.6 , __lowerCamelCase : Union[str, Any]=None , ) -> Optional[int]:
A : Tuple = parent
A : str = batch_size
A : Any = image_size
A : List[str] = patch_size
A : List[Any] = num_channels
A : Optional[int] = is_training
A : List[str] = use_labels
A : int = hidden_size
A : Any = num_hidden_layers
A : str = num_attention_heads
A : Union[str, Any] = intermediate_size
A : str = hidden_act
A : List[Any] = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : Tuple = type_sequence_label_size
A : Union[str, Any] = initializer_range
A : Optional[int] = mask_ratio
A : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A : str = (image_size // patch_size) ** 2
A : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
A : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[str] = None
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Any ) -> Tuple:
A : List[Any] = TFViTMAEModel(config=__lowerCamelCase )
A : int = model(__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> Optional[Any]:
A : str = TFViTMAEForPreTraining(__lowerCamelCase )
A : Any = model(__lowerCamelCase , training=__lowerCamelCase )
# expected sequence length = num_patches
A : Union[str, Any] = (self.image_size // self.patch_size) ** 2
A : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A : List[Any] = 1
A : Dict = TFViTMAEForPreTraining(__lowerCamelCase )
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : str = model(__lowerCamelCase , training=__lowerCamelCase )
A : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
A : Tuple = self.prepare_config_and_inputs()
(A) : Optional[Any] = config_and_inputs
A : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
A : Union[str, Any] = TFViTMAEModelTester(self )
A : Any = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int = model_class(__lowerCamelCase )
A : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
# make the mask reproducible
np.random.seed(2 )
A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A : Any = int((config.image_size // config.patch_size) ** 2 )
A : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A : Any = model_class(__lowerCamelCase )
A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : List[str] = model(__lowerCamelCase , noise=__lowerCamelCase )
A : Dict = copy.deepcopy(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = model(**__lowerCamelCase , noise=__lowerCamelCase )
A : str = outputs_dict[0].numpy()
A : Dict = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
# make the mask reproducible
np.random.seed(2 )
A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = int((config.image_size // config.patch_size) ** 2 )
A : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowerCamelCase : List[str] ):
A : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowerCamelCase ):
A : int = v.numpy()
else:
A : Dict = np.array(__lowerCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
A : Any = model_class(__lowerCamelCase )
A : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : List[str] = prepare_numpy_arrays(__lowerCamelCase )
A : Optional[Any] = model(__lowerCamelCase , noise=__lowerCamelCase )
A : str = model(**__lowerCamelCase , noise=__lowerCamelCase )
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
# make masks reproducible
np.random.seed(2 )
A : Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
A : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A : Dict = tf.constant(__lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A : Optional[Any] = tf_noise
super().check_pt_tf_models(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : Any = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowerCamelCase )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(__lowerCamelCase , __lowerCamelCase ),)
if isinstance(__lowerCamelCase , __lowerCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowerCamelCase , "_keras_serializable" , __lowerCamelCase )
}
A : List[str] = int((config.image_size // config.patch_size) ** 2 )
A : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A : List[Any] = tf.convert_to_tensor(__lowerCamelCase )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
A : Dict = main_layer_class(__lowerCamelCase )
A : int = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
A : Tuple = tf.keras.Model(__lowerCamelCase , outputs=main_layer(__lowerCamelCase ) )
A : Optional[Any] = model(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
A : str = os.path.join(__lowerCamelCase , "keras_model.h5" )
model.save(__lowerCamelCase )
A : str = tf.keras.models.load_model(
__lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowerCamelCase , tf.keras.Model )
A : str = model(__lowerCamelCase )
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
A : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A : List[str] = model_class(__lowerCamelCase )
A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : Tuple = model(__lowerCamelCase , noise=__lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
A : Tuple = outputs.last_hidden_state.numpy()
A : Optional[int] = 0
else:
A : Tuple = outputs.logits.numpy()
A : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
A : List[str] = model_class.from_pretrained(__lowerCamelCase )
A : Tuple = model(__lowerCamelCase , noise=__lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
A : Dict = after_outputs["last_hidden_state"].numpy()
A : List[str] = 0
else:
A : str = after_outputs["logits"].numpy()
A : int = 0
A : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-5 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
# make mask reproducible
np.random.seed(2 )
A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = int((config.image_size // config.patch_size) ** 2 )
A : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A : Optional[int] = model_class(__lowerCamelCase )
A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
A : List[str] = model(__lowerCamelCase , noise=__lowerCamelCase )
A : List[str] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowerCamelCase )
A : List[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
A : int = model_class.from_config(model.config )
A : Union[str, Any] = new_model(__lowerCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
A : str = new_model(__lowerCamelCase , noise=__lowerCamelCase )
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
A : str = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A : List[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
A : Optional[int] = self.default_image_processor
A : Tuple = prepare_img()
A : Optional[int] = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A : Any = ViTMAEConfig()
A : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
A : Union[str, Any] = model(**__lowerCamelCase , noise=__lowerCamelCase )
# verify the logits
A : Optional[Any] = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : List[str] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) | 721 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
_lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A : List[Any] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin" ) )
A : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A : int = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A : Any = tensor_value
A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
# convert tokenizer
A : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 17 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( a_ : Any , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = FunnelConfig.from_json_file(a_ )
print(F"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE : str = FunnelBaseModel(a_ ) if base_model else FunnelModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(a_ , a_ , a_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
lowerCamelCase__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 18 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCamelCase__ : Any = random.Random()
def __A ( a_ : int , a_ : Optional[int]=1.0 , a_ : Optional[Any]=None , a_ : Tuple=None )-> Union[str, Any]:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE : List[str] = global_rng
SCREAMING_SNAKE_CASE : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :int=7 , lowerCamelCase_ :Optional[Any]=4_00 , lowerCamelCase_ :List[Any]=20_00 , lowerCamelCase_ :str=24 , lowerCamelCase_ :Optional[int]=24 , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :int=1_60_00 , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Dict=True , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = min_seq_length
SCREAMING_SNAKE_CASE : List[str] = max_seq_length
SCREAMING_SNAKE_CASE : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Any = feature_size
SCREAMING_SNAKE_CASE : Optional[int] = num_mel_bins
SCREAMING_SNAKE_CASE : List[str] = padding_value
SCREAMING_SNAKE_CASE : Union[str, Any] = sampling_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = return_attention_mask
SCREAMING_SNAKE_CASE : str = do_normalize
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
def _flatten(lowerCamelCase_ :Dict ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = SpeechaTextFeatureExtractionTester(self )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Tuple ) -> str:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : Tuple = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE : str = feature_extractor(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : int = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Any = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE : str = np.asarray(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : Dict = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE : Optional[Any] = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = inputs.input_features
SCREAMING_SNAKE_CASE : Optional[Any] = inputs.attention_mask
SCREAMING_SNAKE_CASE : Optional[Any] = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : str = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE : Optional[int] = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(
lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = inputs.input_features
SCREAMING_SNAKE_CASE : str = inputs.attention_mask
SCREAMING_SNAKE_CASE : Dict = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : int = feature_extractor(
lowerCamelCase_ , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = inputs.input_features
SCREAMING_SNAKE_CASE : Tuple = inputs.attention_mask
SCREAMING_SNAKE_CASE : Optional[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __lowerCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : Dict = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = inputs.input_features
SCREAMING_SNAKE_CASE : int = inputs.attention_mask
SCREAMING_SNAKE_CASE : Optional[int] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : int = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=16 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = inputs.input_features
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.attention_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Tuple = np.random.rand(1_00 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE : Dict = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE : Any = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
from datasets import load_dataset
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Any = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
SCREAMING_SNAKE_CASE : List[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1E-4 ) )
| 18 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """distilbert"""
UpperCamelCase = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self :Any , lowerCamelCase_ :Tuple=3_05_22 , lowerCamelCase_ :Optional[Any]=5_12 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :str=6 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :int=7_68 , lowerCamelCase_ :Optional[Any]=4 * 7_68 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Union[str, Any]=0.0_2 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :Dict=0.2 , lowerCamelCase_ :Union[str, Any]=0 , **lowerCamelCase_ :List[str] , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = sinusoidal_pos_embds
SCREAMING_SNAKE_CASE : List[Any] = n_layers
SCREAMING_SNAKE_CASE : Dict = n_heads
SCREAMING_SNAKE_CASE : List[Any] = dim
SCREAMING_SNAKE_CASE : str = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Dict = attention_dropout
SCREAMING_SNAKE_CASE : Tuple = activation
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = qa_dropout
SCREAMING_SNAKE_CASE : str = seq_classif_dropout
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 18 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ : float , a_ : float , a_ : float , )-> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ : int , a_ : int )-> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[list[int]] = []
create_all_state(1 , a_ , a_ , [] , a_ )
return result
def __A ( a_ : int , a_ : int , a_ : int , a_ : list[int] , a_ : list[list[int]] , )-> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(a_ , total_number - level + 2 ):
current_list.append(a_ )
create_all_state(i + 1 , a_ , level - 1 , a_ , a_ )
current_list.pop()
def __A ( a_ : list[list[int]] )-> None:
'''simple docstring'''
for i in total_list:
print(*a_ )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Tuple = 2
lowerCamelCase__ : Tuple = generate_all_combinations(n, k)
print_all_state(total_list)
| 18 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 1 |
"""simple docstring"""
from math import factorial
def __A ( a_ : int = 20 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE : Dict = n // 2
return int(factorial(a_ ) / (factorial(a_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCamelCase__ : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import functools
def __A ( a_ : str , a_ : str )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
SCREAMING_SNAKE_CASE : Tuple = len(a_ )
@functools.cache
def min_distance(a_ : int , a_ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
SCREAMING_SNAKE_CASE : Tuple = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a_ ) , 1 + min_distance(a_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ : set[int] = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __A ( a_ : list[int] , a_ : tuple[int, ...] )-> str | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for keychar, cipherchar in zip(cycle(a_ ) , a_ ):
SCREAMING_SNAKE_CASE : List[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(a_ )
return decoded
def __A ( a_ : list[int] )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[str] = []
for key in product(a_ , repeat=3 ):
SCREAMING_SNAKE_CASE : Optional[Any] = try_key(a_ , a_ )
if encoded is not None:
possibles.append(a_ )
return possibles
def __A ( a_ : list[str] , a_ : str )-> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def __A ( a_ : str = "p059_cipher.txt" )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[int]
SCREAMING_SNAKE_CASE : list[str]
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str = Path(a_ ).parent.joinpath(a_ ).read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [int(a_ ) for number in data.strip().split(''',''' )]
SCREAMING_SNAKE_CASE : List[Any] = filter_valid_chars(a_ )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE : Any = filter_common_word(a_ , a_ )
if len(a_ ) == 1:
break
SCREAMING_SNAKE_CASE : Tuple = possibles[0]
return sum(ord(a_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : bytes )-> str:
'''simple docstring'''
return "".join([hex(a_ )[2:].zfill(2 ).upper() for byte in list(a_ )] )
def __A ( a_ : str )-> bytes:
'''simple docstring'''
if (len(a_ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(a_ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
import math
def __A ( a_ : int )-> bool:
'''simple docstring'''
return math.sqrt(a_ ) * math.sqrt(a_ ) == num
def __A ( a_ : int )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = n
while left <= right:
SCREAMING_SNAKE_CASE : List[str] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE : List[Any] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encoder-decoder"""
UpperCamelCase = True
def __init__( self :Optional[int] , **lowerCamelCase_ :Optional[int] ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE : int = kwargs.pop('''encoder''' )
SCREAMING_SNAKE_CASE : Dict = encoder_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''decoder''' )
SCREAMING_SNAKE_CASE : List[str] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE : str = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = True
@classmethod
def __lowerCAmelCase ( cls :int , lowerCamelCase_ :PretrainedConfig , lowerCamelCase_ :PretrainedConfig , **lowerCamelCase_ :Optional[Any] ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : int = self.encoder.to_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.decoder.to_dict()
SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type
return output
| 18 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : str = False
def __A ( a_ : Namespace )-> Tuple:
'''simple docstring'''
return TrainCommand(a_ )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( lowerCamelCase_ :ArgumentParser ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=lowerCamelCase_ , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=lowerCamelCase_ , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=lowerCamelCase_ , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=lowerCamelCase_ , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=lowerCamelCase_ , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=lowerCamelCase_ , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=lowerCamelCase_ , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=lowerCamelCase_ , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=lowerCamelCase_ , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=lowerCamelCase_ , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=lowerCamelCase_ , default=3E-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=lowerCamelCase_ , default=1E-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self :Union[str, Any] , lowerCamelCase_ :Namespace ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE : Optional[int] = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = args.output
SCREAMING_SNAKE_CASE : List[str] = args.column_label
SCREAMING_SNAKE_CASE : Dict = args.column_text
SCREAMING_SNAKE_CASE : Dict = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE : Dict = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE : Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE : Dict = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE : List[Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE : Any = args.validation_split
SCREAMING_SNAKE_CASE : Union[str, Any] = args.train_batch_size
SCREAMING_SNAKE_CASE : List[Any] = args.valid_batch_size
SCREAMING_SNAKE_CASE : List[str] = args.learning_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = args.adam_epsilon
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
raise NotImplementedError
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
from math import sqrt
def __A ( a_ : int = 1_00_00_00 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 1 |
"""simple docstring"""
lowerCamelCase__ : Optional[int] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCamelCase__ : Dict = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A ( a_ : dict[int, list[int]] , a_ : int , a_ : list[bool] )-> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Dict = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(a_ , a_ , a_ )
order.append(a_ )
return order
def __A ( a_ : dict[int, list[int]] , a_ : int , a_ : list[bool] )-> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : str = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(a_ , a_ , a_ )
return component
def __A ( a_ : dict[int, list[int]] )-> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = len(a_ ) * [False]
SCREAMING_SNAKE_CASE : dict[int, list[int]] = {vert: [] for vert in range(len(a_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, was_visited in enumerate(a_ ):
if not was_visited:
order += topology_sort(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ ) * [False]
for i in range(len(a_ ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = order[len(a_ ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE : str = find_components(a_ , a_ , a_ )
components_list.append(a_ )
return components_list
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__:
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Union[str, Any]=13 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[Any]=99 , lowerCamelCase_ :int=32 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Any=37 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :List[Any]=32 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Dict=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :List[Any]=None , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Dict = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : int = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Any = num_labels
SCREAMING_SNAKE_CASE : str = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = NezhaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : str = NezhaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = NezhaForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = NezhaForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = NezhaForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , next_sentence_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = NezhaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = NezhaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = NezhaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : List[str] = NezhaForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : str = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = NezhaModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
def __lowerCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = NezhaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.jit.trace(
lowerCamelCase_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , '''bert.pt''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.jit.load(os.path.join(lowerCamelCase_ , '''bert.pt''' ) , map_location=lowerCamelCase_ )
loaded(inputs_dict['''input_ids'''].to(lowerCamelCase_ ) , inputs_dict['''attention_mask'''].to(lowerCamelCase_ ) )
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
| 18 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 18 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 1 |
"""simple docstring"""
import re
def __A ( a_ : str )-> list:
'''simple docstring'''
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __A ( a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A ( a_ : str , a_ : bool , a_ : str )-> str:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Any = split_input(a_ )
if upper:
SCREAMING_SNAKE_CASE : Any = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE : int = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A ( a_ : str )-> str:
'''simple docstring'''
return to_simple_case(a_ )
def __A ( a_ : str )-> str:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Tuple = to_simple_case(a_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A ( a_ : str , a_ : bool )-> str:
'''simple docstring'''
return to_complex_case(a_ , a_ , '''_''' )
def __A ( a_ : str , a_ : bool )-> str:
'''simple docstring'''
return to_complex_case(a_ , a_ , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :str , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :List[Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 18 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase = Features({"""text""": Value("""string""" )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """vision-encoder-decoder"""
UpperCamelCase = True
def __init__( self :List[Any] , **lowerCamelCase_ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''encoder''' )
SCREAMING_SNAKE_CASE : Optional[int] = encoder_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE : Any = kwargs.pop('''decoder''' )
SCREAMING_SNAKE_CASE : List[Any] = decoder_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE : Dict = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = True
@classmethod
def __lowerCAmelCase ( cls :int , lowerCamelCase_ :PretrainedConfig , lowerCamelCase_ :PretrainedConfig , **lowerCamelCase_ :List[str] ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Dict = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : int = self.encoder.to_dict()
SCREAMING_SNAKE_CASE : Tuple = self.decoder.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> float:
'''simple docstring'''
return 1E-4
@property
def __lowerCAmelCase ( self :str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict()
SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :"PreTrainedTokenizerBase" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : Dict = OrderedDict()
SCREAMING_SNAKE_CASE : Any = super().generate_dummy_inputs(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = dummy_input['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
SCREAMING_SNAKE_CASE : Optional[Any] = dummy_input.pop('''input_ids''' )
SCREAMING_SNAKE_CASE : str = dummy_input.pop('''attention_mask''' )
SCREAMING_SNAKE_CASE : List[str] = torch.zeros(lowerCamelCase_ )
return common_inputs
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Tuple ) -> None:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :PretrainedConfig ) -> OnnxConfig:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PretrainedConfig , lowerCamelCase_ :PretrainedConfig , lowerCamelCase_ :str = "default" ) -> OnnxConfig:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase_ , lowerCamelCase_ )
| 18 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = (UnCLIPScheduler,)
def __lowerCAmelCase ( self :str , **lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
'''num_train_timesteps''': 10_00,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**lowerCamelCase_ )
return config
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCamelCase_ , prev_timestep=lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCamelCase_ ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=lowerCamelCase_ ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=lowerCamelCase_ ) - -0.0_0_1_0_0_1_1 < 1E-5
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : int = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE : Any = scheduler.timesteps
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , lowerCamelCase_ )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE : int = None
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , prev_timestep=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Any = pred_prev_sample
SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
pass
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = (DDPMParallelScheduler,)
def __lowerCAmelCase ( self :int , **lowerCamelCase_ :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCamelCase_ )
return config
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1E-5
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE : List[str] = samplea.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = torch.arange(lowerCamelCase_ )[0:3, None].repeat(1 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE : List[str] = scheduler.batch_step_no_noise(lowerCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def __lowerCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[int] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
SCREAMING_SNAKE_CASE : List[str] = -1
else:
SCREAMING_SNAKE_CASE : int = timesteps[i + 1]
SCREAMING_SNAKE_CASE : List[Any] = scheduler.previous_timestep(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = prev_t.item()
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = [1_00, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 18 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 1 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 1
@register_to_config
def __init__( self :Dict , lowerCamelCase_ :Optional[Any]=20_00 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :Dict=20 , lowerCamelCase_ :Union[str, Any]=1E-3 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, torch.device] = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase_ , device=lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ) -> List[Any]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE : Optional[int] = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE : Dict = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE : Optional[Any] = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = -score / std
# compute
SCREAMING_SNAKE_CASE : Union[str, Any] = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE : Optional[int] = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE : int = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sqrt(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE : Optional[Any] = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE : str = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase_ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE : Dict = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :Dict ) -> Any:
'''simple docstring'''
return self.config.num_train_timesteps
| 18 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Union[str, Any]=3 , lowerCamelCase_ :Optional[int]=18 , lowerCamelCase_ :List[Any]=30 , lowerCamelCase_ :Tuple=4_00 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :Dict=None , lowerCamelCase_ :int=True , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ :str=[0.5, 0.5, 0.5] , lowerCamelCase_ :Dict=False , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'''height''': 20, '''width''': 20}
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Optional[int] = size
SCREAMING_SNAKE_CASE : Any = do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE : Dict = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE : int = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_reduce_labels
def __lowerCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(dataset[0]['''file'''] )
SCREAMING_SNAKE_CASE : Dict = Image.open(dataset[1]['''file'''] )
return image, map
def __A ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE : Tuple = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE : int = Image.open(ds[1]['''file'''] )
SCREAMING_SNAKE_CASE : Any = Image.open(ds[2]['''file'''] )
SCREAMING_SNAKE_CASE : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BeitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = BeitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase_ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self :Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE : List[str] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE : Any = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[Any] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mctct"""
def __init__( self :str , lowerCamelCase_ :Dict=80_65 , lowerCamelCase_ :Optional[Any]=15_36 , lowerCamelCase_ :int=36 , lowerCamelCase_ :List[str]=61_44 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :Tuple=3_84 , lowerCamelCase_ :str=9_20 , lowerCamelCase_ :List[str]=1E-5 , lowerCamelCase_ :Optional[int]=0.3 , lowerCamelCase_ :str="relu" , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :str=0.3 , lowerCamelCase_ :Union[str, Any]=0.3 , lowerCamelCase_ :Optional[Any]=1 , lowerCamelCase_ :Union[str, Any]=0 , lowerCamelCase_ :str=2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Union[str, Any]=0.3 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Optional[int]=(7,) , lowerCamelCase_ :List[str]=(3,) , lowerCamelCase_ :Dict=80 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Any=None , lowerCamelCase_ :str="sum" , lowerCamelCase_ :List[Any]=False , **lowerCamelCase_ :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = attention_head_dim
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : str = layerdrop
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
SCREAMING_SNAKE_CASE : int = conv_glu_dim
SCREAMING_SNAKE_CASE : str = conv_dropout
SCREAMING_SNAKE_CASE : Tuple = num_conv_layers
SCREAMING_SNAKE_CASE : int = input_feat_per_channel
SCREAMING_SNAKE_CASE : Optional[Any] = input_channels
SCREAMING_SNAKE_CASE : Any = conv_channels
SCREAMING_SNAKE_CASE : Tuple = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
SCREAMING_SNAKE_CASE : Dict = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = list(lowerCamelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 18 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : int = 1 , a_ : int = 10_00 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : List[str] = 0
for divide_by_number in range(a_ , digit + 1 ):
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : str = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(a_ ):
SCREAMING_SNAKE_CASE : int = len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = divide_by_number
else:
has_been_divided.append(a_ )
SCREAMING_SNAKE_CASE : List[str] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 1 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def __A ( a_ : SplitDict )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = split_dict._to_yaml_list()
assert len(a_ ) == len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = SplitDict._from_yaml_list(a_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE : Dict = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE : List[str] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=a_ ), SplitInfo(dataset_name='''my_dataset''' )] )
def __A ( a_ : Dict )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ : Tuple = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
lowerCamelCase__ : List[Any] = "▁"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """token_type_ids"""]
UpperCamelCase = FNetTokenizer
def __init__( self :List[str] , lowerCamelCase_ :str=None , lowerCamelCase_ :int=None , lowerCamelCase_ :int=False , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="<unk>" , lowerCamelCase_ :Dict="[SEP]" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :List[Any]="[CLS]" , lowerCamelCase_ :Any="[MASK]" , **lowerCamelCase_ :List[str] , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ , normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
else mask_token
)
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[Any] = do_lower_case
SCREAMING_SNAKE_CASE : int = remove_space
SCREAMING_SNAKE_CASE : Dict = keep_accents
SCREAMING_SNAKE_CASE : int = vocab_file
SCREAMING_SNAKE_CASE : List[Any] = False if not self.vocab_file else True
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Dict = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 18 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 1 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = psutil.Process()
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = -1
while True:
SCREAMING_SNAKE_CASE : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
self.thread.start()
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ : List[Any] = PeakCPUMemory()
def __A ( )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE : str = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.memory_allocated(a_ )
torch.cuda.reset_peak_memory_stats()
return measures
def __A ( a_ : List[Any] )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE : Optional[Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
SCREAMING_SNAKE_CASE : Any = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE : str = (torch.cuda.memory_allocated(a_ ) - start_measures[str(a_ )]) / 2**20
SCREAMING_SNAKE_CASE : List[str] = (torch.cuda.max_memory_allocated(a_ ) - start_measures[str(a_ )]) / 2**20
return measures
def __A ( a_ : str , a_ : int )-> Optional[int]:
'''simple docstring'''
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(a_ )]:.2f}MiB" )
SCREAMING_SNAKE_CASE : Optional[int] = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 18 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCamelCase__ : Tuple = "\\n Text data.\n Second line of data."
lowerCamelCase__ : List[str] = "file"
@pytest.fixture(scope='''session''' )
def __A ( a_ : str )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
SCREAMING_SNAKE_CASE : Dict = bytes(a_ , '''utf-8''' )
with zstd.open(a_ , '''wb''' ) as f:
f.write(a_ )
return path
@pytest.fixture
def __A ( a_ : Optional[int] )-> Optional[int]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , a_ ) , '''w''' ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __A ( a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[Any] , a_ : Tuple , a_ : Union[str, Any] )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
SCREAMING_SNAKE_CASE : Union[str, Any] = input_paths[compression_format]
SCREAMING_SNAKE_CASE : Tuple = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Any = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
SCREAMING_SNAKE_CASE : int = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
SCREAMING_SNAKE_CASE : Tuple = f.read()
with open(a_ ) as f:
SCREAMING_SNAKE_CASE : Optional[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : Optional[int] , a_ : Tuple )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''custom_cache'''
SCREAMING_SNAKE_CASE : str = '''custom_extracted_dir'''
SCREAMING_SNAKE_CASE : Any = tmp_path / '''custom_extracted_path'''
if default_extracted:
SCREAMING_SNAKE_CASE : Optional[int] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , a_ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(a_ ) )
SCREAMING_SNAKE_CASE : str = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE : Union[str, Any] = xz_file
SCREAMING_SNAKE_CASE : str = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
SCREAMING_SNAKE_CASE : int = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def __A ( a_ : Any )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
SCREAMING_SNAKE_CASE : Optional[int] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def __A ( a_ : Optional[int] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
SCREAMING_SNAKE_CASE : Union[str, Any] = '''./__missing_file__.txt'''
with pytest.raises(a_ ):
cached_path(a_ )
def __A ( a_ : Optional[int] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
SCREAMING_SNAKE_CASE : Dict = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , a_ )
def __A ( )-> Dict:
'''simple docstring'''
with pytest.raises(a_ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , a_ )
def __A ( a_ : Dict )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(a_ ):
http_get('''https://huggingface.co''' , temp_file=a_ )
with pytest.raises(a_ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , a_ )
def __A ( a_ : List[str] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(a_ ):
ftp_get('''ftp://huggingface.co''' , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , a_ )
def __A ( a_ : str )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(a_ ):
fsspec_get('''s3://huggingface.co''' , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head('''s3://huggingface.co''' )
| 18 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def __A ( a_ : float )-> str:
'''simple docstring'''
assert type(a_ ) in (int, float) and decimal == int(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(a_ )
SCREAMING_SNAKE_CASE : Tuple = ''''''
SCREAMING_SNAKE_CASE : str = False
if decimal < 0:
SCREAMING_SNAKE_CASE : Optional[Any] = True
decimal *= -1
while decimal > 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = divmod(a_ , 16 )
SCREAMING_SNAKE_CASE : Tuple = values[remainder] + hexadecimal
SCREAMING_SNAKE_CASE : Any = '''0x''' + hexadecimal
if negative:
SCREAMING_SNAKE_CASE : Any = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __A ( a_ : Optional[int] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = SwinvaConfig()
SCREAMING_SNAKE_CASE : Tuple = swinva_name.split('''_''' )
SCREAMING_SNAKE_CASE : str = name_split[1]
if "to" in name_split[3]:
SCREAMING_SNAKE_CASE : Optional[Any] = int(name_split[3][-3:] )
else:
SCREAMING_SNAKE_CASE : int = int(name_split[3] )
if "to" in name_split[2]:
SCREAMING_SNAKE_CASE : Dict = int(name_split[2][-2:] )
else:
SCREAMING_SNAKE_CASE : Any = int(name_split[2][6:] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = 96
SCREAMING_SNAKE_CASE : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : Any = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE : List[str] = 1_28
SCREAMING_SNAKE_CASE : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : Optional[Any] = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE : List[Any] = 1_92
SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : Dict = (6, 12, 24, 48)
if "to" in swinva_name:
SCREAMING_SNAKE_CASE : Dict = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
SCREAMING_SNAKE_CASE : List[Any] = 2_18_41
SCREAMING_SNAKE_CASE : List[Any] = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''imagenet-22k-id2label.json'''
SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = {int(a_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[str] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE : Any = 10_00
SCREAMING_SNAKE_CASE : Dict = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : int = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Tuple = {int(a_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[int] = img_size
SCREAMING_SNAKE_CASE : List[Any] = num_classes
SCREAMING_SNAKE_CASE : Dict = embed_dim
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
return config
def __A ( a_ : List[str] )-> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE : List[str] = '''encoder.''' + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE : List[Any] = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE : Tuple = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''head''' , '''classifier''' )
else:
SCREAMING_SNAKE_CASE : Tuple = '''swinv2.''' + name
return name
def __A ( a_ : Any , a_ : Tuple )-> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(a_ )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE : str = key.split('''.''' )
SCREAMING_SNAKE_CASE : Optional[int] = int(key_split[1] )
SCREAMING_SNAKE_CASE : str = int(key_split[3] )
SCREAMING_SNAKE_CASE : Optional[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE : List[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : List[Any] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : int = val[:dim]
SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
SCREAMING_SNAKE_CASE : Tuple = val
return orig_state_dict
def __A ( a_ : Optional[int] , a_ : Dict )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = timm.create_model(a_ , pretrained=a_ )
timm_model.eval()
SCREAMING_SNAKE_CASE : List[Any] = get_swinva_config(a_ )
SCREAMING_SNAKE_CASE : int = SwinvaForImageClassification(a_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = convert_state_dict(timm_model.state_dict() , a_ )
model.load_state_dict(a_ )
SCREAMING_SNAKE_CASE : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(a_ , stream=a_ ).raw )
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=a_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = timm_model(inputs['''pixel_values'''] )
SCREAMING_SNAKE_CASE : Dict = model(**a_ ).logits
assert torch.allclose(a_ , a_ , atol=1E-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a_ )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase__ : List[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 18 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Any = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """levit"""
def __init__( self :Optional[Any] , lowerCamelCase_ :int=2_24 , lowerCamelCase_ :Union[str, Any]=3 , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Optional[int]=1 , lowerCamelCase_ :Dict=16 , lowerCamelCase_ :Dict=[1_28, 2_56, 3_84] , lowerCamelCase_ :Any=[4, 8, 12] , lowerCamelCase_ :List[Any]=[4, 4, 4] , lowerCamelCase_ :Union[str, Any]=[16, 16, 16] , lowerCamelCase_ :Union[str, Any]=0 , lowerCamelCase_ :Tuple=[2, 2, 2] , lowerCamelCase_ :Tuple=[2, 2, 2] , lowerCamelCase_ :List[str]=0.0_2 , **lowerCamelCase_ :int , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Tuple = stride
SCREAMING_SNAKE_CASE : Dict = padding
SCREAMING_SNAKE_CASE : int = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : List[Any] = key_dim
SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : List[str] = attention_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-4
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 10
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 3, 4]
SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
SCREAMING_SNAKE_CASE : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = process_story(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [] )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = process_story(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [] )
self.assertEqual(lowerCamelCase_ , [] )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = process_story(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = ['''It was the best of times.''']
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 2, 3, 4] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 0 ).numpy() , expected.numpy() )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 23 ).numpy() , expected.numpy() )
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 1 ).numpy() , expected.numpy() )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 1_01
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
SCREAMING_SNAKE_CASE : Optional[Any] = compute_token_type_ids(lowerCamelCase_ , lowerCamelCase_ )
np.testing.assert_array_equal(lowerCamelCase_ , lowerCamelCase_ )
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : Tuple = "▁"
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BigBirdTokenizer
UpperCamelCase = BigBirdTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''<s>'''
SCREAMING_SNAKE_CASE : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_04 )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __lowerCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : str = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BigBirdTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] , )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''Hello World!'''
SCREAMING_SNAKE_CASE : Optional[Any] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
SCREAMING_SNAKE_CASE : Any = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@require_torch
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
SCREAMING_SNAKE_CASE : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE : Dict = ''' '''.join(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.big_tokenizer.encode_plus(lowerCamelCase_ , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = BigBirdConfig(attention_type='''original_full''' )
SCREAMING_SNAKE_CASE : Dict = BigBirdModel(lowerCamelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase_ )
model(**lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 18 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 1 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __A ( a_ : int )-> Optional[Any]: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def __A ( a_ : Union[str, Any] )-> List[Any]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : List[Any] = [1, 2]
SCREAMING_SNAKE_CASE : Optional[int] = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE : Optional[int] = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE : Dict = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE : Any = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = 2
SCREAMING_SNAKE_CASE : Tuple = [2, 3]
SCREAMING_SNAKE_CASE : str = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE : List[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE : Optional[int] = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE : Optional[int] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = 2
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE : Optional[Any] = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE : Optional[int] = {
'''a''': np.eye(2 ).astype(lowerCamelCase_ ),
'''b''': np.zeros(3 ).astype(lowerCamelCase_ ),
'''c''': np.ones(2 ).astype(lowerCamelCase_ ),
}
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ , num_proc=lowerCamelCase_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowerCamelCase_ ): # can't pickle a local lambda
map_nested(lambda lowerCamelCase_ : x + 1 , lowerCamelCase_ , num_proc=lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE : Optional[int] = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE : Tuple = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> str:
'''simple docstring'''
class lowercase__:
'''simple docstring'''
UpperCamelCase = """bar"""
SCREAMING_SNAKE_CASE : List[str] = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(lowerCamelCase_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : List[Any] )-> str:
'''simple docstring'''
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE : Union[str, Any] = {F"{i}": i for i in range(a_ )}
SCREAMING_SNAKE_CASE : Union[str, Any] = map_nested(lambda a_ : x + 10 , a_ , num_proc=a_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@require_tf
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE : List[Any] = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE : Optional[Any] = tf.random.uniform((1, 3) )
return model(lowerCamelCase_ ).numpy()
with temp_seed(42 , set_tensorflow=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = gen_random_output()
with temp_seed(42 , set_tensorflow=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = gen_random_output()
SCREAMING_SNAKE_CASE : List[str] = gen_random_output()
np.testing.assert_equal(lowerCamelCase_ , lowerCamelCase_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE : List[str] = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.rand(1 , 3 )
return model(lowerCamelCase_ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = gen_random_output()
with temp_seed(42 , set_pytorch=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = gen_random_output()
SCREAMING_SNAKE_CASE : int = gen_random_output()
np.testing.assert_equal(lowerCamelCase_ , lowerCamelCase_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE : Any = gen_random_output()
SCREAMING_SNAKE_CASE : Tuple = gen_random_output()
np.testing.assert_equal(lowerCamelCase_ , lowerCamelCase_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def __A ( a_ : List[str] )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = NestedDataStructure(a_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def __A ( a_ : Dict , a_ : Any )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = NestedDataStructure(a_ ).flatten()
assert output == expected_output
def __A ( )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = A(x=1 , y='''foobar''' )
SCREAMING_SNAKE_CASE : Dict = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(a_ ) == expected_output
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(a_ ) == expected_output
with pytest.raises(a_ ):
asdict([1, A(x=10 , y='''foo''' )] )
def __A ( a_ : str )-> List[str]:
'''simple docstring'''
return text.split()
def __A ( a_ : int )-> Union[str, Any]:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __A ( )-> Dict:
'''simple docstring'''
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE : Optional[Any] = list(iflatmap_unordered(a_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(a_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE : int = list(iflatmap_unordered(a_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(a_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE : Optional[Any] = []
for yield_time, content in iflatmap_unordered(
a_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(a_ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(a_ ) == 4
| 18 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def __A ( a_ : float , a_ : float )-> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = (DPMSolverSinglestepScheduler,)
UpperCamelCase = (("""num_inference_steps""", 25),)
def __lowerCAmelCase ( self :List[str] , **lowerCamelCase_ :int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**lowerCamelCase_ )
return config
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :List[Any]=0 , **lowerCamelCase_ :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''num_inference_steps''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : Optional[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Optional[Any]=0 , **lowerCamelCase_ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''num_inference_steps''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : Any = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : str = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[str] ) -> str:
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def __lowerCAmelCase ( self :int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : Optional[int] = 50
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : Optional[int] = self.full_loop(scheduler=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
SCREAMING_SNAKE_CASE : str = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[int] = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type='''dpmsolver++''' , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(variance_type=lowerCamelCase_ )
self.check_over_configs(variance_type='''learned_range''' )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.full_loop()
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.full_loop(use_karras_sigmas=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def __lowerCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = 10
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 18 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """megatron-bert"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Optional[Any]=2_90_56 , lowerCamelCase_ :Tuple=10_24 , lowerCamelCase_ :str=24 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=40_96 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Dict=0.0_2 , lowerCamelCase_ :List[str]=1E-12 , lowerCamelCase_ :str=0 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=True , **lowerCamelCase_ :int , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : int = use_cache
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def __A ( a_ : Any , a_ : str=False )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __A ( a_ : Any , a_ : Optional[int] , a_ : Any=False )-> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : List[Any] = ''''''
else:
SCREAMING_SNAKE_CASE : List[str] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : str = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __A ( a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def __A ( a_ : Optional[int] , a_ : Tuple , a_ : List[str] )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = dct.pop(a_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def __A ( a_ : List[Any] , a_ : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ViTConfig()
SCREAMING_SNAKE_CASE : int = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = int(vit_name[-12:-10] )
SCREAMING_SNAKE_CASE : Any = int(vit_name[-9:-6] )
else:
SCREAMING_SNAKE_CASE : Optional[int] = 10_00
SCREAMING_SNAKE_CASE : Union[str, Any] = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Tuple = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = {int(a_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[int] = idalabel
SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = int(vit_name[-6:-4] )
SCREAMING_SNAKE_CASE : Optional[int] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
SCREAMING_SNAKE_CASE : List[str] = 1_92
SCREAMING_SNAKE_CASE : str = 7_68
SCREAMING_SNAKE_CASE : str = 12
SCREAMING_SNAKE_CASE : str = 3
elif vit_name[9:].startswith('''small''' ):
SCREAMING_SNAKE_CASE : List[Any] = 3_84
SCREAMING_SNAKE_CASE : Tuple = 15_36
SCREAMING_SNAKE_CASE : Dict = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = 7_68
SCREAMING_SNAKE_CASE : Tuple = 23_04
SCREAMING_SNAKE_CASE : str = 8
SCREAMING_SNAKE_CASE : str = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
SCREAMING_SNAKE_CASE : Any = 10_24
SCREAMING_SNAKE_CASE : List[str] = 40_96
SCREAMING_SNAKE_CASE : int = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
elif vit_name[4:].startswith('''huge''' ):
SCREAMING_SNAKE_CASE : Optional[int] = 12_80
SCREAMING_SNAKE_CASE : List[Any] = 51_20
SCREAMING_SNAKE_CASE : Tuple = 32
SCREAMING_SNAKE_CASE : Tuple = 16
# load original model from timm
SCREAMING_SNAKE_CASE : Tuple = timm.create_model(a_ , pretrained=a_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_ )
SCREAMING_SNAKE_CASE : str = create_rename_keys(a_ , a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ , a_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTModel(a_ ).eval()
else:
SCREAMING_SNAKE_CASE : int = ViTForImageClassification(a_ ).eval()
model.load_state_dict(a_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
SCREAMING_SNAKE_CASE : Dict = DeiTImageProcessor(size=config.image_size )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE : int = image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[Any] = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE : Optional[int] = model(a_ )
if base_model:
SCREAMING_SNAKE_CASE : Union[str, Any] = timm_model.forward_features(a_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1E-3 )
else:
SCREAMING_SNAKE_CASE : Any = timm_model(a_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1E-3 )
Path(a_ ).mkdir(exist_ok=a_ )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 18 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ : Union[str, Any] = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase__ : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def __A ( )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = cn.convert_to_negative(a_ )
# assert negative_img array for at least one True
assert negative_img.any()
def __A ( )-> Any:
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE : Any = canny.canny(a_ )
# assert canny array for at least one True
assert canny_array.any()
def __A ( )-> Optional[int]:
'''simple docstring'''
assert gg.gaussian_filter(a_ , 5 , sigma=0.9 ).all()
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = conv.img_convolve(a_ , a_ ).astype(a_ )
assert res.any()
def __A ( )-> Optional[int]:
'''simple docstring'''
assert med.median_filter(a_ , 3 ).any()
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = sob.sobel_filter(a_ )
assert grad.any() and theta.any()
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = sp.make_sepia(a_ , 20 )
assert sepia.all()
def __A ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = bs.Burkes(imread(a_ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def __A ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = rs.NearestNeighbour(imread(a_ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def __A ( )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE : Tuple = imread(a_ , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[int] = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE : Dict = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE : List[str] = lbp.local_binary_value(a_ , a_ , a_ )
assert lbp_image.any()
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def __A ( a_ : Union[str, Any] , a_ : Any )-> int:
'''simple docstring'''
try:
with open(a_ , '''rb''' ) as flax_state_f:
SCREAMING_SNAKE_CASE : str = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def __A ( a_ : int , a_ : int )-> Tuple:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE : Dict = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
SCREAMING_SNAKE_CASE : Optional[int] = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[str] = flatten_dict(a_ , sep='''.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE : int = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE : List[Any] = flax_key_tuple_array[:-1] + ['''weight''']
SCREAMING_SNAKE_CASE : Tuple = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE : Dict = flax_key_tuple_array[:-1] + ['''weight''']
SCREAMING_SNAKE_CASE : List[Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE : List[str] = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
SCREAMING_SNAKE_CASE : Optional[int] = '''.'''.join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE : List[str] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE : int = list(a_ )
if len(a_ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
return pt_model
| 18 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 1 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __A ( a_ : Union[str, Any] , a_ : str=None )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if token is not None:
SCREAMING_SNAKE_CASE : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
SCREAMING_SNAKE_CASE : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(a_ , headers=a_ ).json()
SCREAMING_SNAKE_CASE : List[str] = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE : Union[str, Any] = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(a_ ):
SCREAMING_SNAKE_CASE : int = requests.get(url + F"&page={i + 2}" , headers=a_ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __A ( a_ : Any , a_ : Any=None )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = None
if token is not None:
SCREAMING_SNAKE_CASE : List[str] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
SCREAMING_SNAKE_CASE : Any = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
SCREAMING_SNAKE_CASE : Tuple = requests.get(a_ , headers=a_ ).json()
SCREAMING_SNAKE_CASE : str = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
SCREAMING_SNAKE_CASE : List[str] = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = requests.get(url + F"&page={i + 2}" , headers=a_ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __A ( a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : List[str] )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
if token is not None:
SCREAMING_SNAKE_CASE : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
SCREAMING_SNAKE_CASE : Tuple = requests.get(a_ , headers=a_ , allow_redirects=a_ )
SCREAMING_SNAKE_CASE : List[Any] = result.headers['''Location''']
SCREAMING_SNAKE_CASE : Tuple = requests.get(a_ , allow_redirects=a_ )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(a_ , F"{artifact_name}.zip" )
with open(a_ , '''wb''' ) as fp:
fp.write(response.content )
def __A ( a_ : List[str] , a_ : Tuple=None )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = None
with zipfile.ZipFile(a_ ) as z:
for filename in z.namelist():
if not os.path.isdir(a_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(a_ ) as f:
for line in f:
SCREAMING_SNAKE_CASE : str = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE : Optional[int] = line[: line.index(''': ''' )]
SCREAMING_SNAKE_CASE : Tuple = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE : Optional[int] = line[len('''FAILED ''' ) :]
failed_tests.append(a_ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE : List[Any] = line
if len(a_ ) != len(a_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(a_ )} for `errors` "
F"and {len(a_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
''' problem.''' )
SCREAMING_SNAKE_CASE : Tuple = None
if job_name and job_links:
SCREAMING_SNAKE_CASE : Tuple = job_links.get(a_ , a_ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE : Dict = [x + [y] + [job_link] for x, y in zip(a_ , a_ )]
return result
def __A ( a_ : List[str] , a_ : Union[str, Any]=None )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(a_ , a_ ) for p in os.listdir(a_ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(a_ , job_links=a_ ) )
return errors
def __A ( a_ : Optional[Any] , a_ : Optional[Any]=None )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE : List[Any] = counter.most_common()
SCREAMING_SNAKE_CASE : Optional[int] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE : List[Any] = dict(sorted(r.items() , key=lambda a_ : item[1]["count"] , reverse=a_ ) )
return r
def __A ( a_ : Dict )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
SCREAMING_SNAKE_CASE : Tuple = test.split('''/''' )[2]
else:
SCREAMING_SNAKE_CASE : List[Any] = None
return test
def __A ( a_ : str , a_ : Optional[Any]=None )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE : Any = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE : Dict = {x[2] for x in logs}
SCREAMING_SNAKE_CASE : List[str] = {}
for test in tests:
SCREAMING_SNAKE_CASE : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE : Tuple = counter.most_common()
SCREAMING_SNAKE_CASE : List[Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE : List[str] = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE : List[Any] = {'''count''': n_errors, '''errors''': error_counts}
SCREAMING_SNAKE_CASE : Tuple = dict(sorted(r.items() , key=lambda a_ : item[1]["count"] , reverse=a_ ) )
return r
def __A ( a_ : Dict )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''| no. | error | status |'''
SCREAMING_SNAKE_CASE : str = '''|-:|:-|:-|'''
SCREAMING_SNAKE_CASE : Any = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE : List[Any] = reduced_by_error[error]['''count''']
SCREAMING_SNAKE_CASE : Dict = F"| {count} | {error[:1_00]} | |"
lines.append(a_ )
return "\n".join(a_ )
def __A ( a_ : Tuple )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''| model | no. of errors | major error | count |'''
SCREAMING_SNAKE_CASE : Optional[int] = '''|-:|-:|-:|-:|'''
SCREAMING_SNAKE_CASE : List[str] = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE : Dict = reduced_by_model[model]['''count''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = list(reduced_by_model[model]['''errors'''].items() )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(a_ )
return "\n".join(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
lowerCamelCase__ : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCamelCase__ : List[Any] = get_job_links(args.workflow_run_id, token=args.token)
lowerCamelCase__ : Union[str, Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCamelCase__ : Dict = k.find(" / ")
lowerCamelCase__ : Any = k[index + len(" / ") :]
lowerCamelCase__ : List[Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCamelCase__ : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCamelCase__ : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCamelCase__ : Optional[int] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCamelCase__ : int = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCamelCase__ : List[str] = reduce_by_error(errors)
lowerCamelCase__ : Union[str, Any] = reduce_by_model(errors)
lowerCamelCase__ : Any = make_github_table(reduced_by_error)
lowerCamelCase__ : Any = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 1 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A ( a_ : str , a_ : str , a_ : Optional[str] = None )-> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE : List[Any] = quote(a_ )
return hfh.hf_hub_url(a_ , a_ , repo_type='''dataset''' , revision=a_ )
| 18 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 1 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase__ : str = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""input_values""", """attention_mask"""]
def __init__( self :List[Any] , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 1_60_00 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :bool = False , lowerCamelCase_ :int = 80 , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :str = "hann_window" , lowerCamelCase_ :float = 1.0 , lowerCamelCase_ :float = 80 , lowerCamelCase_ :float = 76_00 , lowerCamelCase_ :float = 1E-10 , lowerCamelCase_ :int = 2 , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Dict , ) -> Dict:
'''simple docstring'''
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : int = return_attention_mask
SCREAMING_SNAKE_CASE : Tuple = num_mel_bins
SCREAMING_SNAKE_CASE : Tuple = hop_length
SCREAMING_SNAKE_CASE : str = win_length
SCREAMING_SNAKE_CASE : Any = win_function
SCREAMING_SNAKE_CASE : int = frame_signal_scale
SCREAMING_SNAKE_CASE : List[Any] = fmin
SCREAMING_SNAKE_CASE : Optional[int] = fmax
SCREAMING_SNAKE_CASE : Any = mel_floor
SCREAMING_SNAKE_CASE : Optional[Any] = reduction_factor
SCREAMING_SNAKE_CASE : Dict = win_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE : List[str] = hop_length * sampling_rate // 10_00
SCREAMING_SNAKE_CASE : str = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE : Optional[int] = (self.n_fft // 2) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCamelCase_ , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCamelCase_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCAmelCase ( lowerCamelCase_ :List[np.ndarray] , lowerCamelCase_ :List[np.ndarray] , lowerCamelCase_ :float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
SCREAMING_SNAKE_CASE : Any = np.array(lowerCamelCase_ , np.intaa )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for vector, length in zip(lowerCamelCase_ , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE : str = padding_value
normed_input_values.append(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __lowerCAmelCase ( self :str , lowerCamelCase_ :np.ndarray , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
lowerCamelCase_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self :Optional[int] , lowerCamelCase_ :Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowerCamelCase_ :Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowerCamelCase_ :Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[int] = None , **lowerCamelCase_ :List[Any] , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
SCREAMING_SNAKE_CASE : int = self._process_audio(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ , )
else:
SCREAMING_SNAKE_CASE : int = None
if audio_target is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._process_audio(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ , )
if inputs is None:
return inputs_target
else:
SCREAMING_SNAKE_CASE : Tuple = inputs_target['''input_values''']
SCREAMING_SNAKE_CASE : Any = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_mask
return inputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ :bool = False , lowerCamelCase_ :Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , **lowerCamelCase_ :Optional[int] , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : Tuple = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[int] = speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Any = [speech]
# needed to make pad() work on spectrogram inputs
SCREAMING_SNAKE_CASE : Any = self.feature_size
# convert into correct format for padding
if is_target:
SCREAMING_SNAKE_CASE : Any = [self._extract_mel_features(lowerCamelCase_ ) for waveform in speech]
SCREAMING_SNAKE_CASE : Optional[int] = BatchFeature({'''input_values''': features} )
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_mel_bins
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({'''input_values''': speech} )
SCREAMING_SNAKE_CASE : Any = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Any = feature_size_hack
# convert input values to correct format
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCamelCase_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
SCREAMING_SNAKE_CASE : List[str] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCamelCase_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : List[str] = input_values.astype(np.floataa )
# convert attention_mask to correct format
SCREAMING_SNAKE_CASE : Tuple = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
SCREAMING_SNAKE_CASE : str = (
attention_mask
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE : List[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=lowerCamelCase_ , padding_value=self.padding_value )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Any = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
def __lowerCAmelCase ( self :Optional[int] ) -> Dict[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
SCREAMING_SNAKE_CASE : Any = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 1 |
"""simple docstring"""
import os
def __A ( a_ : str = "input.txt" )-> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(a_ ) , a_ ) ) as input_file:
SCREAMING_SNAKE_CASE : Optional[Any] = [
[int(a_ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE : Any = len(a_ )
SCREAMING_SNAKE_CASE : Tuple = len(matrix[0] )
SCREAMING_SNAKE_CASE : List[str] = [[-1 for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix[i][0]
for j in range(1 , a_ ):
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a_ ):
SCREAMING_SNAKE_CASE : List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
SCREAMING_SNAKE_CASE : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """openai/whisper-base"""
UpperCamelCase = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCamelCase = """transcriber"""
UpperCamelCase = WhisperProcessor
UpperCamelCase = WhisperForConditionalGeneration
UpperCamelCase = ["""audio"""]
UpperCamelCase = ["""text"""]
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :List[Any] ) -> Dict:
'''simple docstring'''
return self.pre_processor(lowerCamelCase_ , return_tensors='''pt''' ).input_features
def __lowerCAmelCase ( self :int , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[Any] ) -> Any:
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0]
| 18 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = ["MaskFormerFeatureExtractor"]
lowerCamelCase__ : Tuple = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
lowerCamelCase__ : int = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch""", """scipy"""]
def __init__( self :Optional[Any] , *lowerCamelCase_ :Any , **lowerCamelCase_ :Any ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :str , **lowerCamelCase_ :Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
| 18 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """AutoTokenizer"""
UpperCamelCase = ["""tokenizer"""]
UpperCamelCase = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self :Union[str, Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :int=None ) -> Dict:
'''simple docstring'''
super().__init__(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = speaker_embeddings
@classmethod
def __lowerCAmelCase ( cls :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Tuple="speaker_embeddings_path.json" , **lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE : int = get_file_from_repo(
lowerCamelCase_ , lowerCamelCase_ , subfolder=kwargs.pop('''subfolder''' , lowerCamelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCamelCase_ ) , force_download=kwargs.pop('''force_download''' , lowerCamelCase_ ) , proxies=kwargs.pop('''proxies''' , lowerCamelCase_ ) , resume_download=kwargs.pop('''resume_download''' , lowerCamelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCamelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCamelCase_ ) , revision=kwargs.pop('''revision''' , lowerCamelCase_ ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(lowerCamelCase_ , lowerCamelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
SCREAMING_SNAKE_CASE : int = None
else:
with open(lowerCamelCase_ ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE : Dict = json.load(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
return cls(tokenizer=lowerCamelCase_ , speaker_embeddings=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any]="speaker_embeddings_path.json" , lowerCamelCase_ :str="speaker_embeddings" , lowerCamelCase_ :bool = False , **lowerCamelCase_ :str , ) -> Optional[Any]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase_ , lowerCamelCase_ , '''v2''' ) , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._load_voice_preset(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , lowerCamelCase_ , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = os.path.join(lowerCamelCase_ , f"{prompt_key}_{key}.npy" )
SCREAMING_SNAKE_CASE : Tuple = tmp_dict
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , '''w''' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
super().save_pretrained(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str = None , **lowerCamelCase_ :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
SCREAMING_SNAKE_CASE : Optional[int] = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , lowerCamelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCamelCase_ ) , force_download=kwargs.pop('''force_download''' , lowerCamelCase_ ) , proxies=kwargs.pop('''proxies''' , lowerCamelCase_ ) , resume_download=kwargs.pop('''resume_download''' , lowerCamelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCamelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCamelCase_ ) , revision=kwargs.pop('''revision''' , lowerCamelCase_ ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.load(lowerCamelCase_ )
return voice_preset_dict
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[dict] = None ) -> Optional[int]:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :List[str]="pt" , lowerCamelCase_ :Union[str, Any]=2_56 , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Tuple=False , **lowerCamelCase_ :Tuple , ) -> Union[str, Any]:
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_voice_preset(lowerCamelCase_ )
else:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not voice_preset.endswith('''.npz''' ):
SCREAMING_SNAKE_CASE : List[Any] = voice_preset + '''.npz'''
SCREAMING_SNAKE_CASE : Optional[Any] = np.load(lowerCamelCase_ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding='''max_length''' , max_length=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
if voice_preset is not None:
SCREAMING_SNAKE_CASE : Dict = voice_preset
return encoded_text
| 18 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Optional[Any] = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : int = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
lowerCamelCase__ : List[Any] = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ElectraTokenizer
def __init__( self :Tuple , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :int=None , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple="[UNK]" , lowerCamelCase_ :List[Any]="[SEP]" , lowerCamelCase_ :Union[str, Any]="[PAD]" , lowerCamelCase_ :Tuple="[CLS]" , lowerCamelCase_ :Any="[MASK]" , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :str=None , **lowerCamelCase_ :int , ) -> Dict:
'''simple docstring'''
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(lowerCamelCase_ , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Dict = do_lower_case
SCREAMING_SNAKE_CASE : Dict = strip_accents
SCREAMING_SNAKE_CASE : Optional[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : int = normalizer_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = do_lower_case
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int]=None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 18 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
import argparse
import os
import re
lowerCamelCase__ : List[str] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase__ : str = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
lowerCamelCase__ : Union[str, Any] = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def __A ( a_ : str , a_ : bool = False )-> Tuple:
'''simple docstring'''
with open(a_ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = f.read()
SCREAMING_SNAKE_CASE : Union[str, Any] = content.split('''\n''' )
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while line_idx < len(a_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
SCREAMING_SNAKE_CASE : Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
SCREAMING_SNAKE_CASE : Tuple = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(a_ , key=lambda a_ : _re_identifier.search(a_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(a_ ) )
elif "\n".join(a_ ) != content:
return True
def __A ( a_ : bool = False )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [os.path.join(a_ , a_ ) for f in os.listdir(a_ ) if f.endswith('''.py''' )]
SCREAMING_SNAKE_CASE : List[Any] = [sort_auto_mapping(a_ , overwrite=a_ ) for fname in fnames]
if not overwrite and any(a_ ):
SCREAMING_SNAKE_CASE : Tuple = [f for f, d in zip(a_ , a_ ) if d]
raise ValueError(
F"The following files have auto mappings that need sorting: {', '.join(a_ )}. Run `make style` to fix"
''' this.''' )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowerCamelCase__ : Any = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 18 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __A ( a_ : Union[str, Any] , a_ : Any=() , a_ : str=None , a_ : Any="no" , a_ : List[str]="29500" )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Tuple = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE : Optional[Any] = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE : int = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
SCREAMING_SNAKE_CASE : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , a_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
SCREAMING_SNAKE_CASE : Optional[int] = 8
SCREAMING_SNAKE_CASE : Optional[Any] = PrepareForLaunch(a_ , distributed_type='''TPU''' )
print(F"Launching a training on {num_processes} TPU cores." )
xmp.spawn(a_ , args=a_ , nprocs=a_ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*a_ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a_ , master_addr='''127.0.01''' , master_port=a_ , mixed_precision=a_ ):
SCREAMING_SNAKE_CASE : Any = PrepareForLaunch(a_ , distributed_type='''MULTI_GPU''' )
print(F"Launching training on {num_processes} GPUs." )
try:
start_processes(a_ , args=a_ , nprocs=a_ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*a_ )
def __A ( a_ : Optional[Any] , a_ : Tuple=() , a_ : int=2 )-> str:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a_ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
SCREAMING_SNAKE_CASE : Optional[Any] = PrepareForLaunch(a_ , debug=a_ )
start_processes(a_ , args=a_ , nprocs=a_ , start_method='''fork''' )
| 18 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
def __A ( a_ : Dict )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
SCREAMING_SNAKE_CASE : Dict = 1_28
elif "12-12" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : int = 12
elif "14-14" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 14
SCREAMING_SNAKE_CASE : Any = 14
elif "16-16" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : str = 16
else:
raise ValueError('''Model not supported''' )
SCREAMING_SNAKE_CASE : Optional[Any] = '''huggingface/label-files'''
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 35
SCREAMING_SNAKE_CASE : str = '''speech-commands-v2-id2label.json'''
else:
SCREAMING_SNAKE_CASE : int = 5_27
SCREAMING_SNAKE_CASE : List[str] = '''audioset-id2label.json'''
SCREAMING_SNAKE_CASE : int = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = {int(a_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = idalabel
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
return config
def __A ( a_ : List[str] )-> Any:
'''simple docstring'''
if "module.v" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def __A ( a_ : Tuple , a_ : List[Any] )-> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(a_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE : int = key.split('''.''' )
SCREAMING_SNAKE_CASE : int = int(key_split[3] )
SCREAMING_SNAKE_CASE : Dict = config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE : Any = val[:dim, :]
SCREAMING_SNAKE_CASE : int = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : str = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : List[Any] = val[:dim]
SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : str = val[-dim:]
else:
SCREAMING_SNAKE_CASE : Any = val
return orig_state_dict
def __A ( a_ : str )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
@torch.no_grad()
def __A ( a_ : Optional[int] , a_ : str , a_ : Union[str, Any]=False )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = get_audio_spectrogram_transformer_config(a_ )
SCREAMING_SNAKE_CASE : Dict = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
SCREAMING_SNAKE_CASE : str = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''' )
# remove some keys
remove_keys(a_ )
# rename some keys
SCREAMING_SNAKE_CASE : Union[str, Any] = convert_state_dict(a_ , a_ )
# load 🤗 model
SCREAMING_SNAKE_CASE : Union[str, Any] = ASTForAudioClassification(a_ )
model.eval()
model.load_state_dict(a_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
SCREAMING_SNAKE_CASE : str = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
SCREAMING_SNAKE_CASE : Dict = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
SCREAMING_SNAKE_CASE : int = 10_24 if '''speech-commands''' not in model_name else 1_28
SCREAMING_SNAKE_CASE : List[Any] = ASTFeatureExtractor(mean=a_ , std=a_ , max_length=a_ )
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
SCREAMING_SNAKE_CASE : List[Any] = dataset[0]['''audio''']['''array''']
else:
SCREAMING_SNAKE_CASE : str = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = torchaudio.load(a_ )
SCREAMING_SNAKE_CASE : List[str] = waveform.squeeze().numpy()
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(a_ , sampling_rate=1_60_00 , return_tensors='''pt''' )
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = model(**a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
SCREAMING_SNAKE_CASE : Any = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
SCREAMING_SNAKE_CASE : Any = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , a_ , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(a_ ).mkdir(exist_ok=a_ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(a_ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ : Tuple = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :Any , **lowerCamelCase_ :Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Dict , *lowerCamelCase_ :str , **lowerCamelCase_ :Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Tuple ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowerCamelCase_ :Any , **lowerCamelCase_ :List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[str] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :int ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Dict ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :int , **lowerCamelCase_ :Dict ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :int , **lowerCamelCase_ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :str ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :int , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowerCamelCase_ :Any , **lowerCamelCase_ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :str , **lowerCamelCase_ :str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :str , **lowerCamelCase_ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :str ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowerCamelCase_ :str , **lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Dict ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :int , *lowerCamelCase_ :Any , **lowerCamelCase_ :List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[str] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def __A ( *a_ : str , **a_ : int )-> int:
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def __A ( *a_ : Tuple , **a_ : Any )-> Tuple:
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def __A ( *a_ : Optional[Any] , **a_ : Optional[Any] )-> int:
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def __A ( *a_ : Tuple , **a_ : Any )-> str:
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def __A ( *a_ : Any , **a_ : str )-> Union[str, Any]:
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def __A ( *a_ : List[Any] , **a_ : List[Any] )-> List[Any]:
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :int ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :int , **lowerCamelCase_ :int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :int , **lowerCamelCase_ :List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :str ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :str , **lowerCamelCase_ :List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Dict , *lowerCamelCase_ :int , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :Any , **lowerCamelCase_ :str ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :int ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :str ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :int , **lowerCamelCase_ :Tuple ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :int ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :Any , **lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Dict ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :str ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowerCamelCase_ :Any , **lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :str ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[str] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :str ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :List[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Dict , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :int ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowerCamelCase_ :Any , **lowerCamelCase_ :str ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowerCamelCase_ :Dict , **lowerCamelCase_ :List[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :str , **lowerCamelCase_ :int ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :Dict , **lowerCamelCase_ :int ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Any ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :str , **lowerCamelCase_ :int ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Dict ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[str] , *lowerCamelCase_ :Any , **lowerCamelCase_ :str ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :int , *lowerCamelCase_ :str , **lowerCamelCase_ :Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Dict ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :int , **lowerCamelCase_ :str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[Any] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowerCamelCase_ :Any , **lowerCamelCase_ :Optional[int] ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[Any] , *lowerCamelCase_ :Any , **lowerCamelCase_ :str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :str , **lowerCamelCase_ :Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowerCamelCase_ :Any , **lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Dict , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowerCamelCase_ :str , **lowerCamelCase_ :Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Dict , *lowerCamelCase_ :Dict , **lowerCamelCase_ :int ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Dict , *lowerCamelCase_ :Any , **lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :List[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :Any , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[Any] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :Dict ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :str ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowerCamelCase_ :Dict , **lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[str] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[str] , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :int , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Any ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :str , **lowerCamelCase_ :Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[Any] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Dict , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :List[str] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :int ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :int , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :int , **lowerCamelCase_ :List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :str , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :int , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Dict , *lowerCamelCase_ :int , **lowerCamelCase_ :str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Tuple , *lowerCamelCase_ :str , **lowerCamelCase_ :List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Dict ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :int , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :str ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls :int , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 1 |
"""simple docstring"""
import random
def __A ( a_ : int )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = num - 1
SCREAMING_SNAKE_CASE : str = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE : Any = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = pow(a_ , a_ , a_ )
if v != 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE : int = i + 1
SCREAMING_SNAKE_CASE : int = (v**2) % num
return True
def __A ( a_ : int )-> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE : List[str] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(a_ )
def __A ( a_ : int = 10_24 )-> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(a_ ):
return num
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""pixel_values"""]
def __init__( self :Dict , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :bool = True , lowerCamelCase_ :Union[int, float] = 1 / 2_55 , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCamelCase_ :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCamelCase_ :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 2_24}
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE : Dict = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : List[str] = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Optional[Any] = resample
SCREAMING_SNAKE_CASE : Any = do_center_crop
SCREAMING_SNAKE_CASE : List[str] = crop_size
SCREAMING_SNAKE_CASE : int = do_rescale
SCREAMING_SNAKE_CASE : Any = rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Any , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : str = int((2_56 / 2_24) * size['''shortest_edge'''] )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(lowerCamelCase_ , size=lowerCamelCase_ , default_to_square=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase_ , size=(size_dict['''height'''], size_dict['''width''']) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :int , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[int, float] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Any , ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :str , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :ImageInput , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[Dict[str, int]] = None , lowerCamelCase_ :PILImageResampling = None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[Dict[str, int]] = None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[float] = None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[Union[float, Iterable[float]]] = None , lowerCamelCase_ :Optional[Union[float, Iterable[float]]] = None , lowerCamelCase_ :Optional[TensorType] = None , lowerCamelCase_ :ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ :Optional[int] , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(lowerCamelCase_ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : str = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[Any] = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.center_crop(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Tuple = [self.rescale(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Dict = [self.normalize(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : Any = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 18 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : Dict = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PegasusTokenizer
UpperCamelCase = PegasusTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = PegasusTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __lowerCAmelCase ( self :int , **lowerCamelCase_ :Tuple ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''</s>'''
SCREAMING_SNAKE_CASE : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(lowerCamelCase_ ) , 11_03 )
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __lowerCAmelCase ( self :Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[int] = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE : int = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
SCREAMING_SNAKE_CASE : List[str] = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
SCREAMING_SNAKE_CASE : Tuple = tokenizer([raw_input_str] , return_tensors=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
SCREAMING_SNAKE_CASE : Tuple = '''To ensure a smooth flow of bank resolutions.'''
SCREAMING_SNAKE_CASE : str = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
SCREAMING_SNAKE_CASE : str = tokenizer([raw_input_str] , return_tensors=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ['''This is going to be way too long.''' * 1_50, '''short example''']
SCREAMING_SNAKE_CASE : List[Any] = ['''not super long but more than 5 tokens''', '''tiny''']
SCREAMING_SNAKE_CASE : str = self._large_tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self._large_tokenizer(
text_target=lowerCamelCase_ , max_length=5 , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase_ ) == 2 # input_ids, attention_mask.
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PegasusTokenizer
UpperCamelCase = PegasusTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = PegasusTokenizer(lowerCamelCase_ , offset=0 , mask_token_sent=lowerCamelCase_ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __lowerCAmelCase ( self :Any , **lowerCamelCase_ :int ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str ) -> Tuple:
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self :Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Dict = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
SCREAMING_SNAKE_CASE : Any = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ['''This is going to be way too long.''' * 10_00, '''short example''']
SCREAMING_SNAKE_CASE : List[str] = ['''not super long but more than 5 tokens''', '''tiny''']
SCREAMING_SNAKE_CASE : List[str] = self._large_tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : str = self._large_tokenizer(
text_target=lowerCamelCase_ , max_length=5 , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase_ ) == 2 # input_ids, attention_mask.
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
SCREAMING_SNAKE_CASE : List[str] = self._large_tokenizer(lowerCamelCase_ ).input_ids
self.assertListEqual(
lowerCamelCase_ , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 18 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Any = logging.get_logger(__name__)
def __A ( a_ : str , a_ : Dict=False )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def __A ( a_ : Dict , a_ : Optional[Any] , a_ : Tuple=False )-> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Tuple = ''''''
else:
SCREAMING_SNAKE_CASE : int = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : str = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[-config.hidden_size :]
def __A ( a_ : Optional[int] , a_ : List[str] , a_ : List[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dct.pop(a_ )
SCREAMING_SNAKE_CASE : str = val
def __A ( )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : str = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def __A ( a_ : Tuple , a_ : int )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE : List[Any] = 10_00
SCREAMING_SNAKE_CASE : Tuple = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Dict = {int(a_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[str] = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = 1_92
SCREAMING_SNAKE_CASE : Optional[Any] = 7_68
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : Dict = 3
elif deit_name[9:].startswith('''small''' ):
SCREAMING_SNAKE_CASE : List[str] = 3_84
SCREAMING_SNAKE_CASE : Tuple = 15_36
SCREAMING_SNAKE_CASE : List[str] = 12
SCREAMING_SNAKE_CASE : Optional[Any] = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
SCREAMING_SNAKE_CASE : int = 10_24
SCREAMING_SNAKE_CASE : List[Any] = 40_96
SCREAMING_SNAKE_CASE : Optional[int] = 24
SCREAMING_SNAKE_CASE : Optional[int] = 16
# load original model from timm
SCREAMING_SNAKE_CASE : Any = timm.create_model(a_ , pretrained=a_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE : str = timm_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = create_rename_keys(a_ , a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ , a_ )
# load HuggingFace model
SCREAMING_SNAKE_CASE : int = DeiTForImageClassificationWithTeacher(a_ ).eval()
model.load_state_dict(a_ )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE : List[str] = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE : int = DeiTImageProcessor(size=a_ , crop_size=config.image_size )
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE : str = model(a_ )
SCREAMING_SNAKE_CASE : Dict = timm_model(a_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1E-3 )
Path(a_ ).mkdir(exist_ok=a_ )
print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase__ : Optional[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ : list )-> float:
'''simple docstring'''
if not nums:
raise ValueError('''List is empty''' )
return sum(a_ ) / len(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """data2vec-audio"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=0.1 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase_ :List[str]=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase_ :str=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase_ :int=False , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Optional[Any]=19 , lowerCamelCase_ :Dict=5 , lowerCamelCase_ :Any=0.0_5 , lowerCamelCase_ :str=10 , lowerCamelCase_ :Optional[Any]=2 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :int=10 , lowerCamelCase_ :int=0 , lowerCamelCase_ :List[Any]="sum" , lowerCamelCase_ :int=False , lowerCamelCase_ :Tuple=False , lowerCamelCase_ :Tuple=2_56 , lowerCamelCase_ :Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCamelCase_ :Optional[Any]=(5, 3, 3, 1, 1) , lowerCamelCase_ :Tuple=(1, 2, 3, 1, 1) , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=2 , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=3 , lowerCamelCase_ :Dict=None , **lowerCamelCase_ :Optional[int] , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = feat_extract_activation
SCREAMING_SNAKE_CASE : int = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = conv_bias
SCREAMING_SNAKE_CASE : str = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : Optional[int] = conv_pos_kernel_size
SCREAMING_SNAKE_CASE : Optional[int] = len(self.conv_dim )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE : str = feat_proj_dropout
SCREAMING_SNAKE_CASE : int = final_dropout
SCREAMING_SNAKE_CASE : Dict = layerdrop
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : Optional[int] = mask_time_prob
SCREAMING_SNAKE_CASE : str = mask_time_length
SCREAMING_SNAKE_CASE : Tuple = mask_time_min_masks
SCREAMING_SNAKE_CASE : str = mask_feature_prob
SCREAMING_SNAKE_CASE : List[str] = mask_feature_length
SCREAMING_SNAKE_CASE : Any = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE : Any = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Any = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE : Any = add_adapter
SCREAMING_SNAKE_CASE : List[Any] = adapter_kernel_size
SCREAMING_SNAKE_CASE : str = adapter_stride
SCREAMING_SNAKE_CASE : List[str] = num_adapter_layers
SCREAMING_SNAKE_CASE : Any = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : List[Any] = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = xvector_output_dim
@property
def __lowerCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
return math.prod(self.conv_stride )
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __A ( a_ : List[str] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[str] )-> Any:
'''simple docstring'''
with open(a_ ) as metadata_file:
SCREAMING_SNAKE_CASE : List[str] = json.load(a_ )
SCREAMING_SNAKE_CASE : Dict = LukeConfig(use_entity_aware_attention=a_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE : List[str] = torch.load(a_ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE : int = load_original_entity_vocab(a_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE : Optional[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE : Any = AddedToken('''<ent>''' , lstrip=a_ , rstrip=a_ )
SCREAMING_SNAKE_CASE : Any = AddedToken('''<ent2>''' , lstrip=a_ , rstrip=a_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(a_ )
with open(os.path.join(a_ , '''tokenizer_config.json''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Any = json.load(a_ )
SCREAMING_SNAKE_CASE : Dict = '''MLukeTokenizer'''
with open(os.path.join(a_ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
with open(os.path.join(a_ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = MLukeTokenizer.from_pretrained(a_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[bias_name]
SCREAMING_SNAKE_CASE : Dict = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = F"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE : str = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE : int = state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE : str = state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE : str = LukeForMaskedLM(config=a_ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[key]
else:
SCREAMING_SNAKE_CASE : str = state_dict[key]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = model.load_state_dict(a_ , strict=a_ )
if set(a_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(a_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE : Any = MLukeTokenizer.from_pretrained(a_ , task='''entity_classification''' )
SCREAMING_SNAKE_CASE : List[Any] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE : int = (0, 9)
SCREAMING_SNAKE_CASE : int = tokenizer(a_ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Any = model(**a_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE : Any = MLukeTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE : List[Any] = (24, 30)
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(a_ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : int = model(**a_ )
SCREAMING_SNAKE_CASE : Dict = encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE : str = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(a_ )
SCREAMING_SNAKE_CASE : Any = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE : int = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(a_ ) )
model.save_pretrained(a_ )
def __A ( a_ : Tuple )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE : List[Any] = [json.loads(a_ ) for line in open(a_ )]
SCREAMING_SNAKE_CASE : str = {}
for entry in data:
SCREAMING_SNAKE_CASE : Optional[int] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE : int = entity_id
break
SCREAMING_SNAKE_CASE : List[str] = F"{language}:{entity_name}"
SCREAMING_SNAKE_CASE : Dict = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 18 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Optional[int] , **lowerCamelCase_ :str ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self :str , lowerCamelCase_ :Union[np.ndarray, bytes, str] , **lowerCamelCase_ :List[str] ) -> Dict:
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict , **lowerCamelCase_ :Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE : Any = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Any="This is a sound of {}." ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
SCREAMING_SNAKE_CASE : Optional[Any] = requests.get(lowerCamelCase_ ).content
else:
with open(lowerCamelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE : Any = f.read()
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = ffmpeg_read(lowerCamelCase_ , self.feature_extractor.sampling_rate )
if not isinstance(lowerCamelCase_ , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
SCREAMING_SNAKE_CASE : Any = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = candidate_labels
SCREAMING_SNAKE_CASE : Any = [hypothesis_template.format(lowerCamelCase_ ) for x in candidate_labels]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = [text_inputs]
return inputs
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE : List[str] = text_inputs[0][0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model(**lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = model_outputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE : Optional[int] = model_outputs['''logits'''][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE : Dict = logits.softmax(dim=0 )
SCREAMING_SNAKE_CASE : Dict = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
SCREAMING_SNAKE_CASE : Dict = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_ ) , key=lambda lowerCamelCase_ : -x[0] )
]
return result
| 18 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : int , a_ : int )-> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = str(bin(a_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(a_ ) )[2:]
SCREAMING_SNAKE_CASE : int = max(len(a_ ) , len(a_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(a_ ) , b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 1 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase__ : str = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """esm"""
def __init__( self :int , lowerCamelCase_ :int=None , lowerCamelCase_ :int=None , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Dict=30_72 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Dict=10_26 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Optional[Any]=1E-12 , lowerCamelCase_ :Dict="absolute" , lowerCamelCase_ :str=True , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Dict=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Dict , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , mask_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : List[str] = emb_layer_norm_before
SCREAMING_SNAKE_CASE : Any = token_dropout
SCREAMING_SNAKE_CASE : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : str = EsmFoldConfig()
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : List[str] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : int = vocab_list
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , lowerCamelCase_ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 0
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = 1_28
UpperCamelCase = None
def __lowerCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : str = TrunkConfig()
elif isinstance(self.trunk , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = TrunkConfig(**self.trunk )
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.trunk.to_dict()
return output
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = 48
UpperCamelCase = 10_24
UpperCamelCase = 1_28
UpperCamelCase = 32
UpperCamelCase = 32
UpperCamelCase = 32
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = False
UpperCamelCase = 4
UpperCamelCase = 1_28
UpperCamelCase = None
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
SCREAMING_SNAKE_CASE : Tuple = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Any = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.structure_module.to_dict()
return output
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = 3_84
UpperCamelCase = 1_28
UpperCamelCase = 16
UpperCamelCase = 1_28
UpperCamelCase = 12
UpperCamelCase = 4
UpperCamelCase = 8
UpperCamelCase = 0.1
UpperCamelCase = 8
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 7
UpperCamelCase = 10
UpperCamelCase = 1E-8
UpperCamelCase = 1E5
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return asdict(self )
def __A ( )-> str:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 18 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(a_ )
return dataset
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = get_dataset()
SCREAMING_SNAKE_CASE : int = make_duplicate_clusters(lowerCamelCase_ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_dataset()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = deduplicate_dataset(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 2 )
print(lowerCamelCase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowerCamelCase_ )
| 18 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 1 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __A ( a_ : Tuple , a_ : str=0.999 , a_ : List[Any]="cosine" , )-> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Any = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) , a_ ) )
return torch.tensor(a_ , dtype=torch.floataa )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = [e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase = 2
@register_to_config
def __init__( self :Dict , lowerCamelCase_ :int = 10_00 , lowerCamelCase_ :float = 0.0_0_0_8_5 , lowerCamelCase_ :float = 0.0_1_2 , lowerCamelCase_ :str = "linear" , lowerCamelCase_ :Optional[Union[np.ndarray, List[float]]] = None , lowerCamelCase_ :str = "epsilon" , lowerCamelCase_ :Optional[bool] = False , lowerCamelCase_ :Optional[bool] = False , lowerCamelCase_ :float = 1.0 , lowerCamelCase_ :str = "linspace" , lowerCamelCase_ :int = 0 , ) -> Tuple:
'''simple docstring'''
if trained_betas is not None:
SCREAMING_SNAKE_CASE : str = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE : List[str] = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE : int = betas_for_alpha_bar(lowerCamelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE : List[str] = betas_for_alpha_bar(lowerCamelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
SCREAMING_SNAKE_CASE : int = 1.0 - self.betas
SCREAMING_SNAKE_CASE : List[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = use_karras_sigmas
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str]=None ) -> Dict:
'''simple docstring'''
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.timesteps
SCREAMING_SNAKE_CASE : str = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE : Dict = 1 if len(lowerCamelCase_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE : Dict = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
SCREAMING_SNAKE_CASE : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self :int , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.index_for_timestep(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index]
SCREAMING_SNAKE_CASE : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, torch.device] = None , lowerCamelCase_ :Optional[int] = None , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = num_inference_steps
SCREAMING_SNAKE_CASE : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE : Tuple = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE : List[str] = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE : List[str] = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
SCREAMING_SNAKE_CASE : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE : int = np.log(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE : int = self._convert_to_karras(in_sigmas=lowerCamelCase_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE : Optional[int] = np.array([self._sigma_to_t(lowerCamelCase_ , lowerCamelCase_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCamelCase_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE : Tuple = timesteps.to(lowerCamelCase_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : List[Any] = timesteps.to(device=lowerCamelCase_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE : int = defaultdict(lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = np.log(lowerCamelCase_ )
# get distribution
SCREAMING_SNAKE_CASE : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE : str = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE : Dict = low_idx + 1
SCREAMING_SNAKE_CASE : List[str] = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE : Optional[int] = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE : Tuple = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(lowerCamelCase_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE : Dict = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE : Dict = t.reshape(sigma.shape )
return t
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :List[Any] ) -> torch.FloatTensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE : float = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE : float = in_sigmas[0].item()
SCREAMING_SNAKE_CASE : int = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE : List[Any] = np.linspace(0 , 1 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE : Optional[int] = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE : Optional[Any] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
return self.dt is None
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[torch.FloatTensor, np.ndarray] , lowerCamelCase_ :Union[float, torch.FloatTensor] , lowerCamelCase_ :Union[torch.FloatTensor, np.ndarray] , lowerCamelCase_ :bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.index_for_timestep(lowerCamelCase_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE : Any = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE : str = self.sigmas[step_index]
SCREAMING_SNAKE_CASE : str = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE : Optional[Any] = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE : Optional[int] = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE : Dict = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE : Tuple = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE : Tuple = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : int = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE : Union[str, Any] = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE : Any = derivative
SCREAMING_SNAKE_CASE : List[str] = dt
SCREAMING_SNAKE_CASE : Optional[int] = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE : Optional[Any] = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE : List[str] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE : List[str] = self.dt
SCREAMING_SNAKE_CASE : List[str] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE : Dict = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE : Tuple = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps]
SCREAMING_SNAKE_CASE : str = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE : List[Any] = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self :int ) -> int:
'''simple docstring'''
return self.config.num_train_timesteps
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ : Any = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """albert"""
def __init__( self :List[str] , lowerCamelCase_ :Any=3_00_00 , lowerCamelCase_ :List[str]=1_28 , lowerCamelCase_ :Optional[Any]=40_96 , lowerCamelCase_ :int=12 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :str=64 , lowerCamelCase_ :List[Any]=1_63_84 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[Any]="gelu_new" , lowerCamelCase_ :Any=0 , lowerCamelCase_ :Union[str, Any]=0 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=2 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :Dict=1E-12 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :str=0 , lowerCamelCase_ :Optional[Any]=2 , lowerCamelCase_ :str=3 , **lowerCamelCase_ :int , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : str = embedding_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_hidden_groups
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = inner_group_num
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 18 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 1 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
UpperCamelCase = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.task_name.lower()
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """train"""
UpperCamelCase = """dev"""
UpperCamelCase = """test"""
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :GlueDataTrainingArguments , lowerCamelCase_ :PreTrainedTokenizerBase , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Union[str, Split] = Split.train , lowerCamelCase_ :Optional[str] = None , ) -> int:
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = args
SCREAMING_SNAKE_CASE : Any = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE : int = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
try:
SCREAMING_SNAKE_CASE : List[str] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
SCREAMING_SNAKE_CASE : Optional[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE : Dict = cached_features_file + '''.lock'''
with FileLock(lowerCamelCase_ ):
if os.path.exists(lowerCamelCase_ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE : Dict = time.time()
SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowerCamelCase_ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(f"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE : List[str] = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE : Tuple = examples[:limit_length]
SCREAMING_SNAKE_CASE : Optional[int] = glue_convert_examples_to_features(
lowerCamelCase_ , lowerCamelCase_ , max_length=args.max_seq_length , label_list=lowerCamelCase_ , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE : List[Any] = time.time()
torch.save(self.features , lowerCamelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self :List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self :List[Any] , lowerCamelCase_ :Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.label_list
| 18 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = length or len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[int] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ : list[int | float] , a_ : int , a_ : int )-> int | float:
'''simple docstring'''
if len(a_ ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(a_ )
or left < -len(a_ )
or right >= len(a_ )
or right < -len(a_ )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE : Dict = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE : Union[str, Any] = find_max(a_ , a_ , a_ ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE : List[Any] = find_max(a_ , mid + 1 , a_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 18 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowercase__( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any]=None , **lowerCamelCase_ :Any ) -> Any:
'''simple docstring'''
super().__init__(features=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Dict ) -> Any:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and column:
if all(
isinstance(lowerCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase_ )
return column
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase_ , (str, bytes, type(lowerCamelCase_ )) ):
return value
elif isinstance(lowerCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Optional[int] = {}
if isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
SCREAMING_SNAKE_CASE : Optional[Any] = {'''dtype''': torch.intaa}
elif isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : Any = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Any = np.asarray(lowerCamelCase_ )
return torch.tensor(lowerCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :int ) -> Union[str, Any]:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase_ , '''__array__''' ) and not isinstance(lowerCamelCase_ , torch.Tensor ):
SCREAMING_SNAKE_CASE : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :dict ) -> Dict:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase_ , map_list=lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :pa.Table ) -> Mapping:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.python_features_decoder.decode_row(lowerCamelCase_ )
return self.recursive_tensorize(lowerCamelCase_ )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :pa.Table ) -> "torch.Tensor":
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.python_features_decoder.decode_column(lowerCamelCase_ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : List[str] = self.recursive_tensorize(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self._consolidate(lowerCamelCase_ )
return column
def __lowerCAmelCase ( self :str , lowerCamelCase_ :pa.Table ) -> Mapping:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.python_features_decoder.decode_batch(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.recursive_tensorize(lowerCamelCase_ )
for column_name in batch:
SCREAMING_SNAKE_CASE : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 18 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCamelCase__ : Dict = "true"
def __A ( a_ : Dict , a_ : Optional[Any]=82 , a_ : List[str]=16 )-> Union[str, Any]:
'''simple docstring'''
set_seed(42 )
SCREAMING_SNAKE_CASE : Optional[Any] = RegressionModel()
SCREAMING_SNAKE_CASE : Optional[Any] = deepcopy(a_ )
SCREAMING_SNAKE_CASE : int = RegressionDataset(length=a_ )
SCREAMING_SNAKE_CASE : str = DataLoader(a_ , batch_size=a_ )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = accelerator.prepare(a_ , a_ )
return model, ddp_model, dataloader
def __A ( a_ : Accelerator , a_ : Tuple=False )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
SCREAMING_SNAKE_CASE : int = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a_ , max_length=a_ )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : int = dataset.map(
a_ , batched=a_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
SCREAMING_SNAKE_CASE : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(a_ : Dict ):
if use_longest:
return tokenizer.pad(a_ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(a_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(a_ , shuffle=a_ , collate_fn=a_ , batch_size=16 )
def __A ( a_ : Tuple , a_ : List[str] )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Accelerator(dispatch_batches=a_ , split_batches=a_ )
SCREAMING_SNAKE_CASE : Dict = get_dataloader(a_ , not dispatch_batches )
SCREAMING_SNAKE_CASE : str = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(a_ , a_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __A ( a_ : Tuple , a_ : Tuple , a_ : List[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = [], []
for logit, targ in logits_and_targets:
logits.append(a_ )
targs.append(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(a_ ), torch.cat(a_ )
return logits, targs
def __A ( a_ : Accelerator , a_ : int=82 , a_ : List[Any]=False , a_ : Union[str, Any]=False , a_ : Any=16 )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = generate_predictions(a_ , a_ , a_ )
assert (
len(a_ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(a_ )}"
def __A ( a_ : bool = False , a_ : bool = False )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = evaluate.load('''glue''' , '''mrpc''' )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = get_mrpc_setup(a_ , a_ )
# First do baseline
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = setup['''no''']
model.to(a_ )
model.eval()
for batch in dataloader:
batch.to(a_ )
with torch.inference_mode():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**a_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=a_ , references=batch['''labels'''] )
SCREAMING_SNAKE_CASE : Dict = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE : List[str] = model(**a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Dict = batch['''labels''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=a_ , references=a_ )
SCREAMING_SNAKE_CASE : int = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def __A ( )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Accelerator(split_batches=a_ , dispatch_batches=a_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(a_ , a_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE : Optional[int] = Accelerator(split_batches=a_ , dispatch_batches=a_ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(a_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
SCREAMING_SNAKE_CASE : Any = Accelerator()
test_torch_metrics(a_ , 5_12 )
accelerator.state._reset_state()
def __A ( a_ : List[str] )-> List[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lt__( self :Optional[int] , lowerCamelCase_ :List[str] ) -> Dict:
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self :List[Any] , lowerCamelCase_ :int ) -> Optional[Any]:
'''simple docstring'''
return self[-1] == other[-1]
def __A ( a_ : list )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[Stack] = []
# sort into stacks
for element in collection:
SCREAMING_SNAKE_CASE : List[str] = Stack([element] )
SCREAMING_SNAKE_CASE : List[str] = bisect_left(a_ , a_ )
if i != len(a_ ):
stacks[i].append(a_ )
else:
stacks.append(a_ )
# use a heap-based merge to merge stack efficiently
SCREAMING_SNAKE_CASE : int = merge(*(reversed(a_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase__ : int = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : str = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 1 |
"""simple docstring"""
import numpy as np
def __A ( a_ : np.array )-> np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.