code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : str = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 185 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a :List[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _lowercase ( __lowerCAmelCase ) -> List[str]:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
SCREAMING_SNAKE_CASE__ : str = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = PegasusConfig(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusForConditionalGeneration(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE__ : Tuple = v.T
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : Any = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : int = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _lowercase ( __lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.train.list_variables(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Any = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
SCREAMING_SNAKE_CASE__ : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE__ : str = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = array
return tf_weights
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# save tokenizer first
SCREAMING_SNAKE_CASE__ : Any = Path(__lowerCAmelCase ).parent.name
SCREAMING_SNAKE_CASE__ : Dict = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE__ : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tf_weights_as_numpy(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE__ : Tuple = task_specific_params
SCREAMING_SNAKE_CASE__ : str = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
a :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
a :Optional[Any] = parser.parse_args()
if args.save_dir is None:
a :List[Any] = Path(args.tf_ckpt_path).parent.name
a :Optional[Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 132 | 0 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCamelCase ( UpperCAmelCase__ : ndarray ) -> float:
return np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
class __magic_name__ :
def __init__( self : int , *,
lowercase_ : float = np.inf , lowercase_ : str = "linear" , lowercase_ : float = 0.0 , ):
lowercase_ : Any = regularization
lowercase_ : List[str] = gamma
if kernel == "linear":
lowercase_ : Any = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
lowercase_ : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase_ : Any = f'''Unknown kernel: {kernel}'''
raise ValueError(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : ndarray , lowercase_ : ndarray ):
return np.dot(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : ndarray , lowercase_ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : list[ndarray] , lowercase_ : ndarray ):
lowercase_ : Union[str, Any] = observations
lowercase_ : List[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase_) , ) : List[str] = np.shape(lowercase_ )
def to_minimize(lowercase_ : ndarray ) -> float:
lowercase_ : List[str] = 0
((lowercase_) , ) : Tuple = np.shape(lowercase_ )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowercase_ )
lowercase_ : Optional[Any] = LinearConstraint(lowercase_ , 0 , 0 )
lowercase_ : Dict = Bounds(0 , self.regularization )
lowercase_ : Tuple = minimize(
lowercase_ , np.ones(lowercase_ ) , bounds=lowercase_ , constraints=[ly_contraint] ).x
lowercase_ : int = l_star
# calculating mean offset of separation plane to points
lowercase_ : List[str] = 0
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase_ : Tuple = s / n
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : ndarray ):
lowercase_ : List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
lowercase_ : Union[str, Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" )
lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = GenerationConfig()
lowercase_ : int = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase_ : List[str] = copy.deepcopy(lowercase_ )
lowercase_ : Tuple = generation_config.update(**lowercase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {"""foo""": """bar"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = GenerationConfig()
lowercase_ : int = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ )
assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowercase_ )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowercase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ):
lowercase_ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
| 21 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A__ ( unittest.TestCase ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase : str = KarrasVeScheduler()
UpperCamelCase : List[str] = KarrasVePipeline(unet=A_ , scheduler=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Tuple = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(num_inference_steps=2 , generator=A_ , output_type="numpy" ).images
UpperCamelCase : List[Any] = torch.manual_seed(0 )
UpperCamelCase : Any = pipe(num_inference_steps=2 , generator=A_ , output_type="numpy" , return_dict=A_ )[0]
UpperCamelCase : str = image[0, -3:, -3:, -1]
UpperCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = "google/ncsnpp-celebahq-256"
UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained(A_ )
UpperCamelCase : Any = KarrasVeScheduler()
UpperCamelCase : List[Any] = KarrasVePipeline(unet=A_ , scheduler=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[str] = torch.manual_seed(0 )
UpperCamelCase : List[str] = pipe(num_inference_steps=20 , generator=A_ , output_type="numpy" ).images
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase : int = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 52 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 | 1 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 213 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 213 | 1 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a_ = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
a_ = str(bin(UpperCAmelCase ) )[2:]
a_ = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : int = (KDPMaDiscreteScheduler,)
a_ : List[str] = 10
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Tuple:
a_ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__UpperCAmelCase)
return config
def UpperCAmelCase__ ( self) ->Optional[Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(prediction_type="v_prediction")
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(__UpperCAmelCase))
a_ = torch.mean(torch.abs(__UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07) < 1E-2
assert abs(result_mean.item() - 6.1112E-10) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07) < 1E-2
assert abs(result_mean.item() - 0.0_002) < 1E-3
def UpperCAmelCase__ ( self) ->str:
if torch_device == "mps":
return
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(__UpperCAmelCase))
a_ = torch.mean(torch.abs(__UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
def UpperCAmelCase__ ( self) ->Any:
if torch_device == "mps":
return
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter.to(__UpperCAmelCase) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a_ = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(__UpperCAmelCase))
a_ = torch.mean(torch.abs(__UpperCAmelCase))
if str(__UpperCAmelCase).startswith("cpu"):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
| 243 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _lowerCAmelCase ( lowercase ) -> List[str]:
if isinstance(__a , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _UpperCAmelCase :
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_a,_a )
__lowerCAmelCase = TFVisionTextDualEncoderModel(_a )
__lowerCAmelCase = model(input_ids=_a,pixel_values=_a,attention_mask=_a )
self.assertEqual(output["""text_embeds"""].shape,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape,(pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_a,_a )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_a,text_model=_a )
__lowerCAmelCase = model(input_ids=_a,pixel_values=_a,attention_mask=_a )
self.assertEqual(output["""text_embeds"""].shape,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape,(pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_a,_a )
__lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_a )
__lowerCAmelCase = model(input_ids=_a,pixel_values=_a,attention_mask=_a )
self.assertEqual(output["""text_embeds"""].shape,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape,(pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_a,_a )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_a,text_model=_a )
__lowerCAmelCase = model(input_ids=_a,pixel_values=_a,attention_mask=_a )
__lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_a )
__lowerCAmelCase = model(input_ids=_a,pixel_values=_a,attention_mask=_a )
__lowerCAmelCase = after_output[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a,1e-5 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_a,_a )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_a,text_model=_a )
__lowerCAmelCase = model(
input_ids=_a,pixel_values=_a,attention_mask=_a,output_attentions=_a )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_a ),vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:],(vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_a ),text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:],(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]),)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_a,_a,f'Difference between torch and flax is {diff} (>= {tol}).' )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_a )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_a )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_a )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_a )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_a )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs()
__lowerCAmelCase = model_a(**_a )
__lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_a )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_a )
__lowerCAmelCase = model_a(**_a )
__lowerCAmelCase = after_outputs[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a,1e-5 )
@require_tf
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""","""hf-internal-testing/tiny-random-bert""" )
__lowerCAmelCase = 13
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4],model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = TFViTModel(_a,name="""vision_model""" )
__lowerCAmelCase = TFBertModel(_a,name="""text_model""" )
return vision_model, text_model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFViTModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""","""hf-internal-testing/tiny-random-roberta""" )
__lowerCAmelCase = 13
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4],model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_a,_a )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_a,text_model=_a )
__lowerCAmelCase = model(
input_ids=_a,pixel_values=_a,attention_mask=_a,output_attentions=_a )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_a ),vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:],(vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_a ),text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:],(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]),)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = TFDeiTModel(_a,name="""vision_model""" )
__lowerCAmelCase = TFRobertaModel(_a,name="""text_model""" )
return vision_model, text_model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = TFRobertaModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""","""hf-internal-testing/tiny-random-bert""" )
__lowerCAmelCase = 13
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4],model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = TFCLIPVisionModel(_a,name="""vision_model""" )
__lowerCAmelCase = TFBertModel(_a,name="""text_model""" )
return vision_model, text_model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFCLIPVisionModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""",logit_scale_init_value=1.0,from_pt=_a )
__lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""],images=_a,padding=_a,return_tensors="""np""" )
__lowerCAmelCase = model(**_a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]),)
__lowerCAmelCase = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy(),_a,atol=1e-3 ) )
| 350 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def _lowerCAmelCase ( lowercase=None ) -> Any:
__lowerCAmelCase = argparse.ArgumentParser(add_help=lowercase , allow_abbrev=lowercase )
# The main config parser
__lowerCAmelCase = config_command_parser(lowercase )
# The subparser to add commands to
__lowerCAmelCase = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(lowercase , parents=[parent_parser] )
update_command_parser(lowercase , parents=[parent_parser] )
return config_parser
def _lowerCAmelCase ( ) -> List[Any]:
__lowerCAmelCase = get_config_parser()
__lowerCAmelCase = config_parser.parse_args()
if not hasattr(lowercase , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 46 | 0 |
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _snake_case ( __a ):
'''simple docstring'''
A__ : Dict = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
A__ : str = '''CIDAS/clipseg-rd64-refined'''
A__ : List[Any] = '''image_segmenter'''
A__ : str = CLIPSegForImageSegmentation
A__ : Dict = ['''image''', '''text''']
A__ : Union[str, Any] = ['''image''']
def __init__( self: Dict ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[int] ) -> Any:
requires_backends(self ,["""vision"""] )
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ) -> Dict:
return self.pre_processor(text=[label] ,images=[image] ,padding=lowerCAmelCase__ ,return_tensors="""pt""" )
def A__ ( self: Any ,lowerCamelCase_: Tuple ) -> Dict:
with torch.no_grad():
UpperCAmelCase_ : List[str] = self.model(**lowerCAmelCase__ ).logits
return logits
def A__ ( self: Tuple ,lowerCamelCase_: Optional[Any] ) -> str:
UpperCAmelCase_ : Tuple = outputs.cpu().detach().numpy()
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 345 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip_2_vision_model'''
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0_00_01 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = qkv_bias
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = '''blip_2_qformer'''
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = cross_attention_frequency
__SCREAMING_SNAKE_CASE = encoder_hidden_size
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip-2'''
__lowercase : Any = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__):
super().__init__(**lowerCAmelCase__)
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""")
if qformer_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""")
if text_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""")
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = BlipaQFormerConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings
__SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder
__SCREAMING_SNAKE_CASE = num_query_tokens
__SCREAMING_SNAKE_CASE = self.vision_config.hidden_size
__SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__SCREAMING_SNAKE_CASE = 1.0
__SCREAMING_SNAKE_CASE = 0.02
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.qformer_config.to_dict()
__SCREAMING_SNAKE_CASE = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 100 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def a__ ( __lowercase ) -> List[Any]:
_A = DPTConfig()
if "large" in checkpoint_url:
_A = 1024
_A = 4096
_A = 24
_A = 16
_A = [5, 11, 17, 23]
_A = [256, 512, 1024, 1024]
_A = (1, 384, 384)
if "ade" in checkpoint_url:
_A = True
_A = 150
_A = "huggingface/label-files"
_A = "ade20k-id2label.json"
_A = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="dataset" ) ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = [1, 150, 480, 480]
return config, expected_shape
def a__ ( __lowercase ) -> Optional[Any]:
_A = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_A = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
_A = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
_A = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
_A = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
_A = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
_A = name.replace("proj" , "projection" )
if "blocks" in name:
_A = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
_A = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_A = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
_A = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_A = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
_A = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
_A = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
_A = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
_A = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
_A = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
_A = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
_A = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_A = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_A = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
_A = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
_A = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
_A = name.replace("conv1" , "convolution1" )
if "conv2" in name:
_A = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_A = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
_A = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
_A = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
_A = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_A = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
_A = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
_A = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
_A = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
_A = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
_A = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
_A = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
_A = name.replace("pretrained" , "dpt" )
if "bn" in name:
_A = name.replace("bn" , "batch_norm" )
if "head" in name:
_A = name.replace("head" , "head.head" )
if "encoder.norm" in name:
_A = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
_A = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def a__ ( __lowercase , __lowercase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_A = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[: config.hidden_size, :]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def a__ ( ) -> Any:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
_A , _A = get_dpt_config(__lowercase )
# load original state_dict from URL
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowercase )
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(__lowercase )
_A = val
# read in qkv matrices
read_in_q_k_v(__lowercase , __lowercase )
# load HuggingFace model
_A = DPTForSemanticSegmentation(__lowercase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
# Check outputs on an image
_A = 480 if "ade" in checkpoint_url else 384
_A = DPTImageProcessor(size=__lowercase )
_A = prepare_img()
_A = image_processor(__lowercase , return_tensors="pt" )
# forward pass
_A = model(**__lowercase ).logits if "ade" in checkpoint_url else model(**__lowercase ).predicted_depth
# Assert logits
_A = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
_A = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(__lowercase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __lowercase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __lowercase )
)
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__lowercase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__lowercase , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
a_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 163 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 't5'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any] , a__ : Optional[int]=3_21_28 , a__ : Any=5_12 , a__ : Any=64 , a__ : List[str]=20_48 , a__ : Tuple=6 , a__ : Dict=None , a__ : Optional[int]=8 , a__ : int=32 , a__ : List[str]=1_28 , a__ : Optional[Any]=0.1 , a__ : Union[str, Any]=1E-6 , a__ : Dict=1.0 , a__ : Optional[int]="relu" , a__ : Tuple=True , a__ : Any=True , a__ : Tuple=0 , a__ : Optional[Any]=1 , **a__ : Tuple , ) -> int:
'''simple docstring'''
_A = vocab_size
_A = d_model
_A = d_kv
_A = d_ff
_A = num_layers
_A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_A = num_heads
_A = relative_attention_num_buckets
_A = relative_attention_max_distance
_A = dropout_rate
_A = layer_norm_epsilon
_A = initializer_factor
_A = feed_forward_proj
_A = use_cache
_A = self.feed_forward_proj.split("-" )
_A = act_info[-1]
_A = act_info[0] == "gated"
if len(a__ ) > 1 and act_info[0] != "gated" or len(a__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_A = "gelu_new"
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , **a__ , )
class snake_case ( _UpperCamelCase):
@property
def a_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_A = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_A = "past_encoder_sequence + sequence"
_A = {0: "batch"}
_A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_A = {0: "batch", 1: "decoder_sequence"}
_A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="inputs" )
return common_inputs
@property
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return 13
| 163 | 1 |
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
_lowercase : Tuple = -1
_lowercase : List[str] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_lowercase : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
_lowercase : Tuple = n - a - b
if c * c == (a * a + b * b):
_lowercase : str = a * b * c
if candidate >= product:
_lowercase : Any = candidate
return product
if __name__ == "__main__":
print(F"{solution() = }")
| 21 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_=1024 , A_=1024 , A_=3.6 ) -> Tuple:
__UpperCamelCase =tokenizer
__UpperCamelCase =tokenizer.bos_token_id
__UpperCamelCase =dataset
__UpperCamelCase =seq_length
__UpperCamelCase =seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> Optional[Any]:
__UpperCamelCase =iter(self.dataset )
__UpperCamelCase =True
while more_examples:
__UpperCamelCase , __UpperCamelCase =[], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A_ )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__UpperCamelCase =False
break
__UpperCamelCase =tokenizer(A_ , truncation=A_ )['input_ids']
__UpperCamelCase =[]
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(A_ ) , self.seq_length ):
__UpperCamelCase =all_token_ids[i : i + self.seq_length]
if len(A_ ) == self.seq_length:
yield torch.tensor(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase ={'streaming': True}
__UpperCamelCase =load_dataset(args.dataset_name , split='train' , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =ConstantLengthDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , seq_length=args.seq_length )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=args.batch_size )
return eval_dataloader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
model.eval()
__UpperCamelCase =[]
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
with torch.no_grad():
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__UpperCamelCase =torch.mean(torch.cat(SCREAMING_SNAKE_CASE__ ) )
try:
__UpperCamelCase =torch.exp(SCREAMING_SNAKE_CASE__ )
except OverflowError:
__UpperCamelCase =float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_A = Accelerator()
# Parse configuration
_A = HfArgumentParser(EvaluationArguments)
_A = parser.parse_args()
set_seed(args.seed)
# Logging
_A = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_A = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_A = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_A = create_dataloader(args)
# Prepare everything with our `accelerator`.
_A , _A = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_A , _A = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 117 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_A = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_A = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = BertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> Any:
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
__UpperCamelCase =getattr(A_ , normalizer_state.pop('type' ) )
__UpperCamelCase =do_lower_case
__UpperCamelCase =strip_accents
__UpperCamelCase =tokenize_chinese_chars
__UpperCamelCase =normalizer_class(**A_ )
__UpperCamelCase =do_lower_case
def _a ( self , A_ , A_=None ) -> List[str]:
__UpperCamelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 117 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ):
# Initialise PyTorch model
lowercase_ : Any = LxmertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Any = LxmertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 213 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'generated'
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = {}
if truncation is not None:
lowercase_ : int = truncation
lowercase_ : Dict = generate_kwargs
lowercase_ : List[Any] = {}
if return_tensors is not None and return_type is None:
lowercase_ : Union[str, Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ : str = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ : Union[str, Any] = self.tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowercase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,__UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowercase_ : str = ([prefix + arg for arg in args[0]],)
lowercase_ : Union[str, Any] = True
elif isinstance(args[0] ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = (prefix + args[0],)
lowercase_ : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ : List[Any] = self.tokenizer(*__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
if (
isinstance(args[0] ,__UpperCamelCase )
and all(isinstance(__UpperCamelCase ,__UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self._parse_and_tokenize(__UpperCamelCase ,truncation=__UpperCamelCase ,**__UpperCamelCase )
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.framework == "pt":
lowercase_ , lowercase_ : Optional[int] = model_inputs['input_ids'].shape
elif self.framework == "tf":
lowercase_ , lowercase_ : Union[str, Any] = tf.shape(model_inputs['input_ids'] ).numpy()
lowercase_ : str = generate_kwargs.get('min_length' ,self.model.config.min_length )
lowercase_ : List[Any] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(__UpperCamelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
lowercase_ : Tuple = self.model.generate(**__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = output_ids.shape[0]
if self.framework == "pt":
lowercase_ : List[Any] = output_ids.reshape(__UpperCamelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ : List[Any] = tf.reshape(__UpperCamelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=ReturnType.TEXT ,__UpperCamelCase=False ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ : List[Any] = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ : str = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ,)
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'summary'
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'translation'
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
if getattr(self.tokenizer ,'_build_translation_inputs' ,__UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase ,return_tensors=self.framework ,truncation=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase ,truncation=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ : int = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
lowercase_ : str = src_lang
if tgt_lang is not None:
lowercase_ : Optional[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ : Tuple = kwargs.get('task' ,self.task )
lowercase_ : List[str] = task.split('_' )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
lowercase_ : Union[str, Any] = items[1]
lowercase_ : Tuple = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
| 213 | 1 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__SCREAMING_SNAKE_CASE :Tuple = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
__SCREAMING_SNAKE_CASE :Tuple = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
__SCREAMING_SNAKE_CASE :List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__SCREAMING_SNAKE_CASE :Optional[int] = F"down_blocks.{i}.resnets.{j}."
__SCREAMING_SNAKE_CASE :Any = F"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__SCREAMING_SNAKE_CASE :Any = F"down_blocks.{i}.attentions.{j}."
__SCREAMING_SNAKE_CASE :List[Any] = F"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__SCREAMING_SNAKE_CASE :List[Any] = F"up_blocks.{i}.resnets.{j}."
__SCREAMING_SNAKE_CASE :Any = F"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__SCREAMING_SNAKE_CASE :Tuple = F"up_blocks.{i}.attentions.{j}."
__SCREAMING_SNAKE_CASE :Any = F"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__SCREAMING_SNAKE_CASE :Optional[Any] = F"down_blocks.{i}.downsamplers.0.conv."
__SCREAMING_SNAKE_CASE :Optional[Any] = F"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__SCREAMING_SNAKE_CASE :Union[str, Any] = F"up_blocks.{i}.upsamplers.0."
__SCREAMING_SNAKE_CASE :str = F"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__SCREAMING_SNAKE_CASE :Tuple = '''mid_block.attentions.0.'''
__SCREAMING_SNAKE_CASE :str = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__SCREAMING_SNAKE_CASE :Union[str, Any] = F"mid_block.resnets.{j}."
__SCREAMING_SNAKE_CASE :Optional[int] = F"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCAmelCase_ ( __lowercase : int ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCAmelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCAmelCase = v.replace(__lowercase , __lowercase )
_UpperCAmelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCAmelCase = v.replace(__lowercase , __lowercase )
_UpperCAmelCase = v
_UpperCAmelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__SCREAMING_SNAKE_CASE :List[Any] = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__SCREAMING_SNAKE_CASE :Any = F"encoder.down_blocks.{i}.resnets.{j}."
__SCREAMING_SNAKE_CASE :Optional[Any] = F"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__SCREAMING_SNAKE_CASE :Optional[Any] = F"down_blocks.{i}.downsamplers.0."
__SCREAMING_SNAKE_CASE :int = F"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__SCREAMING_SNAKE_CASE :Tuple = F"up_blocks.{i}.upsamplers.0."
__SCREAMING_SNAKE_CASE :int = F"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__SCREAMING_SNAKE_CASE :List[str] = F"decoder.up_blocks.{i}.resnets.{j}."
__SCREAMING_SNAKE_CASE :Optional[Any] = F"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__SCREAMING_SNAKE_CASE :Tuple = F"mid_block.resnets.{i}."
__SCREAMING_SNAKE_CASE :str = F"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__SCREAMING_SNAKE_CASE :Tuple = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> Dict:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCAmelCase = v.replace(__lowercase , __lowercase )
_UpperCAmelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCAmelCase = v.replace(__lowercase , __lowercase )
_UpperCAmelCase = v
_UpperCAmelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCAmelCase = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'mid.attn_1.{weight_name}.weight' in k:
print(f'Reshaping {k} for SD format' )
_UpperCAmelCase = reshape_weight_for_sd(__lowercase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__SCREAMING_SNAKE_CASE :Optional[int] = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
__SCREAMING_SNAKE_CASE :Any = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__SCREAMING_SNAKE_CASE :List[str] = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__SCREAMING_SNAKE_CASE :Dict = {'''q''': 0, '''k''': 1, '''v''': 2}
def UpperCAmelCase_ ( __lowercase : str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
_UpperCAmelCase = k[: -len(".q_proj.weight" )]
_UpperCAmelCase = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
_UpperCAmelCase = [None, None, None]
_UpperCAmelCase = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
_UpperCAmelCase = k[: -len(".q_proj.bias" )]
_UpperCAmelCase = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
_UpperCAmelCase = [None, None, None]
_UpperCAmelCase = v
continue
_UpperCAmelCase = textenc_pattern.sub(lambda __lowercase : protected[re.escape(m.group(0 ) )] , __lowercase )
_UpperCAmelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
_UpperCAmelCase = textenc_pattern.sub(lambda __lowercase : protected[re.escape(m.group(0 ) )] , __lowercase )
_UpperCAmelCase = torch.cat(__lowercase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
_UpperCAmelCase = textenc_pattern.sub(lambda __lowercase : protected[re.escape(m.group(0 ) )] , __lowercase )
_UpperCAmelCase = torch.cat(__lowercase )
return new_state_dict
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
__SCREAMING_SNAKE_CASE :str = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__SCREAMING_SNAKE_CASE :Optional[Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
__SCREAMING_SNAKE_CASE :Optional[Any] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
__SCREAMING_SNAKE_CASE :Dict = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__SCREAMING_SNAKE_CASE :List[Any] = load_file(unet_path, device='''cpu''')
else:
__SCREAMING_SNAKE_CASE :Tuple = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
__SCREAMING_SNAKE_CASE :int = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
__SCREAMING_SNAKE_CASE :Optional[int] = load_file(vae_path, device='''cpu''')
else:
__SCREAMING_SNAKE_CASE :Dict = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
__SCREAMING_SNAKE_CASE :int = load_file(text_enc_path, device='''cpu''')
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
__SCREAMING_SNAKE_CASE :Optional[int] = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
__SCREAMING_SNAKE_CASE :Tuple = convert_unet_state_dict(unet_state_dict)
__SCREAMING_SNAKE_CASE :Optional[int] = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__SCREAMING_SNAKE_CASE :List[str] = convert_vae_state_dict(vae_state_dict)
__SCREAMING_SNAKE_CASE :Any = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__SCREAMING_SNAKE_CASE :int = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__SCREAMING_SNAKE_CASE :Any = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
__SCREAMING_SNAKE_CASE :Optional[int] = convert_text_enc_state_dict_vaa(text_enc_dict)
__SCREAMING_SNAKE_CASE :Tuple = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
__SCREAMING_SNAKE_CASE :List[str] = convert_text_enc_state_dict(text_enc_dict)
__SCREAMING_SNAKE_CASE :Optional[int] = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__SCREAMING_SNAKE_CASE :List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__SCREAMING_SNAKE_CASE :Union[str, Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__SCREAMING_SNAKE_CASE :Optional[int] = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 362 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
_UpperCAmelCase = set()
# Replace all the whitespace in our sentence
_UpperCAmelCase = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowercase ) == 26
def UpperCAmelCase_ ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
_UpperCAmelCase = [False] * 26
for char in input_str:
if char.islower():
_UpperCAmelCase = True
elif char.isupper():
_UpperCAmelCase = True
return all(__lowercase )
def UpperCAmelCase_ ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
_UpperCAmelCase = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=__lowercase ) )
print(timeit("is_pangram_faster()" , setup=__lowercase ) )
print(timeit("is_pangram_fastest()" , setup=__lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 156 | 0 |
# Function to print upper half of diamond (pyramid)
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
for i in range(0 , lowerCamelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
for i in range(lowerCamelCase_ , 0 , -1 ):
for _ in range(lowerCamelCase_ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase_ ) # upper half
reverse_floyd(lowerCamelCase_ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
SCREAMING_SNAKE_CASE : Tuple = 1
while K:
SCREAMING_SNAKE_CASE : Union[str, Any] = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
SCREAMING_SNAKE_CASE : List[str] = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 21 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : int=13 , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=True , _UpperCamelCase : List[str]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=99 , _UpperCamelCase : List[Any]=32 , _UpperCamelCase : Any=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Optional[int]=37 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : int=512 , _UpperCamelCase : List[Any]=16 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : str=4 , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_choices
def __snake_case( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase__ : str = True
lowercase__ : str = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __snake_case( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self )
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=_a )
SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE = model(_a )[0]
SCREAMING_SNAKE_CASE = 50_000
SCREAMING_SNAKE_CASE = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
SCREAMING_SNAKE_CASE = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 361 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase :
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : torch.Tensor # [batch_size x 3]
lowercase__ : int
lowercase__ : int
lowercase__ : float
lowercase__ : float
lowercase__ : Tuple[int]
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __snake_case( self : int ) -> str:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __snake_case( self : Tuple ) -> List[str]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __snake_case( self : Any ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(_UpperCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def __snake_case( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = self.shape
SCREAMING_SNAKE_CASE = int(np.prod(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = self.get_image_coords()
SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE = self.get_camera_rays(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rays.view(_UpperCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __snake_case( self : Optional[int] , _UpperCamelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE = coords.view(_UpperCamelCase , -1 , 2 )
SCREAMING_SNAKE_CASE = self.resolution()
SCREAMING_SNAKE_CASE = self.fov()
SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE = fracs.view(_UpperCamelCase , -1 , 2 )
SCREAMING_SNAKE_CASE = (
self.z.view(_UpperCamelCase , 1 , 3 )
+ self.x.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_UpperCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(_UpperCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_UpperCamelCase , *_UpperCamelCase , 2 , 3 )
def __snake_case( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_UpperCamelCase , height=_UpperCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
SCREAMING_SNAKE_CASE = np.array([np.sin(UpperCAmelCase__ ), np.cos(UpperCAmelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE = -z * 4
SCREAMING_SNAKE_CASE = np.array([np.cos(UpperCAmelCase__ ), -np.sin(UpperCAmelCase__ ), 0.0] )
SCREAMING_SNAKE_CASE = np.cross(UpperCAmelCase__ , UpperCAmelCase__ )
origins.append(UpperCAmelCase__ )
xs.append(UpperCAmelCase__ )
ys.append(UpperCAmelCase__ )
zs.append(UpperCAmelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCAmelCase__ , axis=0 ) ).float() , width=UpperCAmelCase__ , height=UpperCAmelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCAmelCase__ )) , )
| 206 | 0 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( a__ , a__ , a__ ):
lowerCAmelCase :List[str] = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 5_0257 , _lowerCamelCase = 1024 , _lowerCamelCase = 768 , _lowerCamelCase = 12 , _lowerCamelCase = 12 , _lowerCamelCase = None , _lowerCamelCase = "gelu_new" , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 1e-5 , _lowerCamelCase = 0.02 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = False , ):
super().__init__()
UpperCAmelCase__ : Optional[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''')
UpperCAmelCase__ : str = prefix_inner_dim
UpperCAmelCase__ : Tuple = prefix_hidden_dim
UpperCAmelCase__ : Any = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ : Optional[Any] = (
nn.Linear(self.prefix_hidden_dim , _lowerCamelCase) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ : Optional[Any] = GPTaConfig(
vocab_size=_lowerCamelCase , n_positions=_lowerCamelCase , n_embd=_lowerCamelCase , n_layer=_lowerCamelCase , n_head=_lowerCamelCase , n_inner=_lowerCamelCase , activation_function=_lowerCamelCase , resid_pdrop=_lowerCamelCase , embd_pdrop=_lowerCamelCase , attn_pdrop=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , initializer_range=_lowerCamelCase , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , scale_attn_by_inverse_layer_idx=_lowerCamelCase , reorder_and_upcast_attn=_lowerCamelCase , )
UpperCAmelCase__ : int = GPTaLMHeadModel(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ):
UpperCAmelCase__ : Dict = self.transformer.transformer.wte(_lowerCamelCase)
UpperCAmelCase__ : Any = self.encode_prefix(_lowerCamelCase)
UpperCAmelCase__ : Tuple = self.decode_prefix(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
UpperCAmelCase__ : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
UpperCAmelCase__ : Tuple = torch.cat((dummy_token, input_ids) , dim=1)
UpperCAmelCase__ : Union[str, Any] = self.transformer(inputs_embeds=_lowerCamelCase , labels=_lowerCamelCase , attention_mask=_lowerCamelCase)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
return torch.zeros(_lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
return self.encode_prefix(_lowerCamelCase)
@torch.no_grad()
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = torch.split(_lowerCamelCase , 1 , dim=0)
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = []
for feature in features:
UpperCAmelCase__ : int = self.decode_prefix(feature.to(_lowerCamelCase)) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ : str = self.generate_beam(
input_embeds=_lowerCamelCase , device=_lowerCamelCase , eos_token_id=_lowerCamelCase)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
UpperCAmelCase__ : List[Any] = torch.stack(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = torch.stack(_lowerCamelCase)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = 5 , _lowerCamelCase = 67 , _lowerCamelCase = 1.0 , _lowerCamelCase = None , ):
UpperCAmelCase__ : Dict = eos_token_id
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Any = torch.ones(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.int)
UpperCAmelCase__ : Any = torch.zeros(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.bool)
if input_embeds is not None:
UpperCAmelCase__ : Optional[int] = input_embeds
else:
UpperCAmelCase__ : Any = self.transformer.transformer.wte(_lowerCamelCase)
for i in range(_lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = self.transformer(inputs_embeds=_lowerCamelCase)
UpperCAmelCase__ : List[str] = outputs.logits
UpperCAmelCase__ : List[str] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ : int = logits.softmax(-1).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = logits.topk(_lowerCamelCase , -1)
UpperCAmelCase__ : int = generated.expand(_lowerCamelCase , *generated.shape[1:])
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
UpperCAmelCase__ : Any = next_tokens
else:
UpperCAmelCase__ : Tuple = tokens.expand(_lowerCamelCase , *tokens.shape[1:])
UpperCAmelCase__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1)
else:
UpperCAmelCase__ : Any = -float(np.inf)
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ : Optional[Any] = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = scores_sum_average.view(-1).topk(_lowerCamelCase , -1)
UpperCAmelCase__ : str = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ : Optional[int] = seq_lengths[next_tokens_source]
UpperCAmelCase__ : List[str] = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ : List[Any] = next_tokens.unsqueeze(1)
UpperCAmelCase__ : Dict = tokens[next_tokens_source]
UpperCAmelCase__ : Optional[int] = torch.cat((tokens, next_tokens) , dim=1)
UpperCAmelCase__ : Optional[Any] = generated[next_tokens_source]
UpperCAmelCase__ : List[Any] = scores_sum_average * seq_lengths
UpperCAmelCase__ : Union[str, Any] = is_stopped[next_tokens_source]
UpperCAmelCase__ : Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
UpperCAmelCase__ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1)
UpperCAmelCase__ : Union[str, Any] = is_stopped + next_tokens.eq(_lowerCamelCase).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ : Tuple = scores / seq_lengths
UpperCAmelCase__ : Union[str, Any] = scores.argsort(descending=_lowerCamelCase)
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ : Optional[Any] = [tokens[i] for i in order]
UpperCAmelCase__ : Optional[Any] = torch.stack(_lowerCamelCase , dim=0)
UpperCAmelCase__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 163 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
UpperCAmelCase__ : Optional[int] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
return image
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : int = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Dict = val
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase__ : List[Any] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase__ : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
UpperCAmelCase__ : Tuple = qkv_bias
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = 3_6_4 if """coco""" in model_name else 2_2_4
UpperCAmelCase__ : int = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase__ : str = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase__ : List[Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase__ : Dict = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase__ : Optional[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
UpperCAmelCase__ : int = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ):
UpperCAmelCase__ : Tuple = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
UpperCAmelCase__ : int = tokenizer("""\n""" , add_special_tokens=UpperCamelCase__ ).input_ids[0]
UpperCAmelCase__ , UpperCAmelCase__ : Any = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
UpperCAmelCase__ : List[str] = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
UpperCAmelCase__ : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
UpperCAmelCase__ : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
UpperCAmelCase__ : List[Any] = original_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase__ : str = state_dict.pop(UpperCamelCase__ )
if key.startswith("""Qformer.bert""" ):
UpperCAmelCase__ : Any = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
UpperCAmelCase__ : Dict = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
UpperCAmelCase__ : Any = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
UpperCAmelCase__ : int = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
UpperCAmelCase__ : Optional[int] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
UpperCAmelCase__ : int = key.replace("""t5""" , """language""" )
UpperCAmelCase__ : List[str] = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase__ : List[Any] = load_demo_image()
UpperCAmelCase__ : Any = vis_processors["""eval"""](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
UpperCAmelCase__ : Any = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
# create processor
UpperCAmelCase__ : int = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
UpperCAmelCase__ : Any = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
UpperCAmelCase__ : Tuple = processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase__ : List[str] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
UpperCAmelCase__ : Union[str, Any] = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
UpperCAmelCase__ : List[str] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
UpperCAmelCase__ : Any = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase__ : Optional[Any] = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase__ : Any = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase__ : int = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=UpperCamelCase__ )
else:
# cast to same type
UpperCAmelCase__ : int = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
UpperCAmelCase__ : Union[str, Any] = """"""
UpperCAmelCase__ : Dict = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
UpperCAmelCase__ : int = original_model.generate({"""image""": original_pixel_values} )
UpperCAmelCase__ : Optional[Any] = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = input_ids.shape[1]
UpperCAmelCase__ : Optional[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
UpperCAmelCase__ : Any = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
__A =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__A =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 163 | 1 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCamelCase__ : Optional[int] = TOKENIZER_CLASSES
else:
UpperCamelCase__ : Optional[int] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + '''Fast''' )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCamelCase__ : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCamelCase__ : str = True
if checkpoint_name is None:
UpperCamelCase__ : Optional[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCamelCase__ : Tuple = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCamelCase__ : int = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCamelCase__ : List[str] = checkpoint.split('''/''' )
UpperCamelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
UpperCamelCase__ : Tuple = checkpoint
UpperCamelCase__ : List[Any] = dump_path
else:
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Tuple = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCamelCase__ : Union[str, Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCamelCase__ : List[Any] = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
UpperCamelCase__ : str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCamelCase__ : int = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__UpperCamelCase : int = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 367 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple="attention" ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
UpperCamelCase__ : Optional[Any] = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
UpperCamelCase__ : Union[str, Any] = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
UpperCamelCase__ : int = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any=False ):
"""simple docstring"""
if split_mlp_wi:
UpperCamelCase__ : Optional[int] = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
UpperCamelCase__ : int = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
UpperCamelCase__ : Any = (wi_a, wi_a)
else:
UpperCamelCase__ : Union[str, Any] = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
UpperCamelCase__ : Any = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _a ( SCREAMING_SNAKE_CASE : dict , *, SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase__ : List[str] = {'''/'''.join(SCREAMING_SNAKE_CASE ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase__ : List[Any] = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase__ : List[Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ : int = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''attention''' )
UpperCamelCase__ : Tuple = layer_norm
UpperCamelCase__ : Optional[int] = k.T
UpperCamelCase__ : Any = o.T
UpperCamelCase__ : Dict = q.T
UpperCamelCase__ : List[str] = v.T
# Block i, layer 1 (MLP).
UpperCamelCase__ : Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ : Dict = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = layer_norm
if split_mlp_wi:
UpperCamelCase__ : Optional[int] = wi[0].T
UpperCamelCase__ : Tuple = wi[1].T
else:
UpperCamelCase__ : List[Any] = wi.T
UpperCamelCase__ : Optional[int] = wo.T
UpperCamelCase__ : List[str] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
UpperCamelCase__ : str = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ : List[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''self_attention''' )
UpperCamelCase__ : Dict = layer_norm
UpperCamelCase__ : Optional[Any] = k.T
UpperCamelCase__ : Tuple = o.T
UpperCamelCase__ : Any = q.T
UpperCamelCase__ : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase__ : Optional[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase__ : Optional[int] = layer_norm
UpperCamelCase__ : List[Any] = k.T
UpperCamelCase__ : Optional[Any] = o.T
UpperCamelCase__ : Dict = q.T
UpperCamelCase__ : Any = v.T
# Block i, layer 2 (MLP).
UpperCamelCase__ : Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = layer_norm
if split_mlp_wi:
UpperCamelCase__ : str = wi[0].T
UpperCamelCase__ : Any = wi[1].T
else:
UpperCamelCase__ : Tuple = wi.T
UpperCamelCase__ : Tuple = wo.T
UpperCamelCase__ : Optional[int] = old['''decoder/decoder_norm/scale''']
UpperCamelCase__ : Any = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase__ : Dict = old['''decoder/logits_dense/kernel'''].T
return new
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : bool ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ : Optional[Any] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ : List[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase__ : List[str] = state_dict['''shared.weight''']
return state_dict
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
UpperCamelCase__ : Tuple = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = make_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
UpperCamelCase__ : Tuple = TaConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase__ : Any = TaEncoderModel(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Union[str, Any] = TaForConditionalGeneration(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE )
print('''Done''' )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
__UpperCamelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 51 | 0 |
import numpy as np
from PIL import Image
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 117 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case__ : int = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
snake_case__ : int = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
snake_case__ : str = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BertTokenizer
def __init__(self :List[str] , _UpperCamelCase :List[str]=None , _UpperCamelCase :Optional[Any]=None , _UpperCamelCase :str=True , _UpperCamelCase :Optional[Any]="[UNK]" , _UpperCamelCase :Tuple="[SEP]" , _UpperCamelCase :List[Any]="[PAD]" , _UpperCamelCase :int="[CLS]" , _UpperCamelCase :Optional[int]="[MASK]" , _UpperCamelCase :Union[str, Any]=True , _UpperCamelCase :str=None , **_UpperCamelCase :List[str] , )-> str:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
__A = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**_UpperCamelCase )
__A = do_lower_case
def _lowerCAmelCase (self :Any , _UpperCamelCase :int , _UpperCamelCase :List[str]=None )-> List[Any]:
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase (self :Any , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
__A = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 117 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCamelCase : Union[str, Any] = datasets.load_iris()
_lowerCamelCase : Union[str, Any] = np.array(data["""data"""])
_lowerCamelCase : Tuple = np.array(data["""target"""])
_lowerCamelCase : Union[str, Any] = data["""target_names"""]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = train_test_split(X, y)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return np.linalg.norm(np.array(lowercase_ ) - np.array(lowercase_ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=5 ) -> Optional[Any]:
"""simple docstring"""
A__ = zip(lowercase_ , lowercase_ )
# List of distances of all points from the point to be classified
A__ = []
for data_point in data:
A__ = euclidean_distance(data_point[0] , lowercase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A__ = [i[1] for i in sorted(lowercase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A__ = Counter(lowercase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 231 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 1 |
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> bool:
"""simple docstring"""
return np.array_equal(__lowerCAmelCase , matrix.conjugate().T )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = v.conjugate().T
SCREAMING_SNAKE_CASE__ = v_star.dot(__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(__lowerCAmelCase )) / (v_star.dot(__lowerCAmelCase ))
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
SCREAMING_SNAKE_CASE__ = np.array([[1], [2], [3]] )
assert is_hermitian(__lowerCAmelCase ), f"""{a} is not hermitian."""
print(rayleigh_quotient(__lowerCAmelCase , __lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__lowerCAmelCase ), f"""{a} is not hermitian."""
assert rayleigh_quotient(__lowerCAmelCase , __lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 219 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCAmelCase : str = False
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = False
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
__lowerCAmelCase : Optional[int] = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
__lowerCAmelCase : str = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__lowerCAmelCase : Any = reader.read()
__lowerCAmelCase : int = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__lowerCAmelCase : Any = UNetaDModel(**config)
else:
__lowerCAmelCase : List[str] = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
__lowerCAmelCase : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCAmelCase : Dict = config[key]
del config[key]
__lowerCAmelCase : int = [k.replace("UNetRes", "") for k in config["down_block_types"]]
__lowerCAmelCase : Optional[Any] = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
__lowerCAmelCase : Any = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__lowerCAmelCase : Tuple = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__lowerCAmelCase : Dict = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__lowerCAmelCase : Union[str, Any] = param_value
__lowerCAmelCase : str = True
if not has_changed:
__lowerCAmelCase : Union[str, Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 156 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365 |
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [int(_A ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(_A ) == 4 and all(0 <= int(_A ) <= 254 for octet in octets )
if __name__ == "__main__":
_A = input().strip()
_A = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 167 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A : List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class __A:
snake_case_ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
snake_case_ = field(
default=a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
snake_case_ = field(
default=a , metadata={'''help''': '''The column name of the images in the files.'''} )
snake_case_ = field(default=a , metadata={'''help''': '''A folder containing the training data.'''} )
snake_case_ = field(default=a , metadata={'''help''': '''A folder containing the validation data.'''} )
snake_case_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
snake_case_ = field(
default=a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
snake_case_ = field(
default=a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = {}
if self.train_dir is not None:
__a = self.train_dir
if self.validation_dir is not None:
__a = self.validation_dir
__a = data_files if data_files else None
@dataclass
class __A:
snake_case_ = field(
default=a , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
snake_case_ = field(
default=a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
snake_case_ = field(
default=a , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
snake_case_ = field(
default=a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
snake_case_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case_ = field(default=a , metadata={'''help''': '''Name or path of preprocessor config.'''} )
snake_case_ = field(
default=a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
snake_case_ = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
snake_case_ = field(
default=a , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class __A( a ):
snake_case_ = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def __lowerCAmelCase ( a__ ) -> Tuple:
__a = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def __lowerCAmelCase ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , a__ , a__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a = training_args.get_process_log_level()
logger.setLevel(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__a = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a__ ) and data_args.train_val_split > 0.0:
__a = ds['''train'''].train_test_split(data_args.train_val_split )
__a = split['''train''']
__a = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__a = ViTMAEConfig.from_pretrained(model_args.config_name , **a__ )
elif model_args.model_name_or_path:
__a = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **a__ )
else:
__a = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__a = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **a__ )
elif model_args.model_name_or_path:
__a = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **a__ )
else:
__a = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__a = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__a = ViTMAEForPreTraining(a__ )
if training_args.do_train:
__a = ds['''train'''].column_names
else:
__a = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__a = data_args.image_column_name
elif "image" in column_names:
__a = '''image'''
elif "img" in column_names:
__a = '''img'''
else:
__a = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__a = image_processor.size['''shortest_edge''']
else:
__a = (image_processor.size['''height'''], image_processor.size['''width'''])
__a = Compose(
[
Lambda(lambda a__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(a__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(a__ ):
__a = [transforms(a__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__a = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(a__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__a = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(a__ )
# Compute absolute learning rate
__a = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__a = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__a = Trainer(
model=a__ , args=a__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
__a = None
if training_args.resume_from_checkpoint is not None:
__a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a = last_checkpoint
__a = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__a = trainer.evaluate()
trainer.log_metrics('''eval''' , a__ )
trainer.save_metrics('''eval''' , a__ )
# Write model card and (optionally) push to hub
__a = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
def __lowerCAmelCase ( a__ ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :Tuple = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Tuple = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = [sequences]
A_ : List[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : str = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Optional[int] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : Any = self.tokenizer.eos_token
try:
A_ : Optional[Any] = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Union[str, Any] = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Tuple = {}
if "candidate_labels" in kwargs:
A_ : Optional[int] = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : Union[str, Any] = {}
if "multi_label" in kwargs:
A_ : Optional[int] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Any = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : Any = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Dict = inputs["""candidate_label"""]
A_ : Any = inputs["""sequence"""]
A_ : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : Optional[int] = self.model(**lowercase )
A_ : Optional[Any] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Tuple = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : Optional[int] = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Union[str, Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : List[str] = logits.shape[0]
A_ : Optional[int] = len(lowercase )
A_ : int = N // n
A_ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Dict = self.entailment_id
A_ : Dict = -1 if entailment_id == 0 else 0
A_ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Optional[Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Tuple = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : Any = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 206 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'mra'
def __init__( self : str , a : Optional[Any]=50_265 , a : List[Any]=768 , a : List[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Union[str, Any]="gelu" , a : List[str]=0.1 , a : str=0.1 , a : Any=512 , a : List[Any]=1 , a : Any=0.0_2 , a : str=1E-5 , a : str="absolute" , a : int=4 , a : Optional[int]="full" , a : Union[str, Any]=0 , a : int=0 , a : Optional[int]=1 , a : Tuple=0 , a : str=2 , **a : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : Any = position_embedding_type
lowerCAmelCase__ : List[Any] = block_per_row
lowerCAmelCase__ : Optional[int] = approx_mode
lowerCAmelCase__ : Optional[int] = initial_prior_first_n_blocks
lowerCAmelCase__ : Optional[Any] = initial_prior_diagonal_n_blocks
| 362 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307 | 0 |
def A_ ( snake_case : int = 1000 ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = 1, 1
__UpperCamelCase = []
for i in range(1 , n + 1 ):
__UpperCamelCase = prev_numerator + 2 * prev_denominator
__UpperCamelCase = prev_numerator + prev_denominator
if len(str(__A ) ) > len(str(__A ) ):
result.append(__A )
__UpperCamelCase = numerator
__UpperCamelCase = denominator
return len(__A )
if __name__ == "__main__":
print(F"{solution() = }")
| 328 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
snake_case : Tuple = '''http://www.mocksite.com/file1.txt'''
snake_case : Optional[Any] = '''"text": ["foo", "foo"]'''
snake_case : List[str] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class snake_case_ :
UpperCAmelCase__ : Union[str, Any] = 2_0_0
UpperCAmelCase__ : str = {'''Content-Length''': '''100'''}
UpperCAmelCase__ : Any = {}
def lowerCamelCase__( self :Optional[int] ,**__snake_case :List[Any] ) -> Optional[Any]:
return [bytes(__snake_case ,'utf-8' )]
def __lowercase ( *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Any ):
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
import requests
monkeypatch.setattr(__lowerCAmelCase , 'request' , __lowerCAmelCase )
a__ = URL
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = url
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = [url]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = {'train': url}
a__ = 'dummy'
a__ = 'downloads'
a__ = tmp_path
a__ = DownloadConfig(
cache_dir=os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , use_etag=__lowerCAmelCase , )
a__ = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
a__ = dl_manager.download(__lowerCAmelCase )
a__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a__ = [downloaded_paths]
a__ = [urls]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
a__ = downloaded_paths.values()
a__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
a__ = Path(__lowerCAmelCase )
a__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
a__ = downloaded_path.read_text()
assert content == CONTENT
a__ = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
a__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
a__ = str(__lowerCAmelCase )
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = filename
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = [filename]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = {'train': filename}
a__ = 'dummy'
a__ = xz_file.parent
a__ = 'extracted'
a__ = DownloadConfig(
cache_dir=__lowerCAmelCase , use_etag=__lowerCAmelCase , )
a__ = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
a__ = dl_manager.extract(__lowerCAmelCase )
a__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a__ = [extracted_paths]
a__ = [paths]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in extracted_paths.keys()
a__ = extracted_paths.values()
a__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
a__ = Path(__lowerCAmelCase )
a__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCAmelCase , etag=__lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
a__ = extracted_path.read_text()
a__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert path.endswith('.jsonl' )
for num_items, line in enumerate(__lowerCAmelCase , start=1 ):
a__ = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
a__ = request.getfixturevalue(__lowerCAmelCase )
a__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
a__ = request.getfixturevalue(__lowerCAmelCase )
a__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def __lowercase ( __lowerCAmelCase : Any ):
a__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCAmelCase ) , start=1 ):
assert os.path.basename(__lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 362 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ): # noqa: E741
while r - l > 1:
a__ = (l + r) // 2
if v[m] >= key:
a__ = m
else:
a__ = m # noqa: E741
return r
def __lowercase ( __lowerCAmelCase : list[int] ):
if len(__lowerCAmelCase ) == 0:
return 0
a__ = [0] * len(__lowerCAmelCase )
a__ = 1
a__ = v[0]
for i in range(1 , len(__lowerCAmelCase ) ):
if v[i] < tail[0]:
a__ = v[i]
elif v[i] > tail[length - 1]:
a__ = v[i]
length += 1
else:
a__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _lowerCAmelCase :
def __a ( self , _UpperCamelCase ) -> Optional[Any]:
raise NotImplementedError()
def __a ( self ) -> Dict:
raise NotImplementedError()
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = False , **_UpperCamelCase ) -> str:
lowerCAmelCase_ = tokenizer
lowerCAmelCase_ = skip_prompt
lowerCAmelCase_ = decode_kwargs
# variables used in the streaming process
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase ) -> str:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
lowerCAmelCase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
lowerCAmelCase_ = text[self.print_len :]
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(_UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase_ = text[self.print_len :]
self.print_len += len(_UpperCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase_ = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(_UpperCamelCase )
self.on_finalized_text(_UpperCamelCase )
def __a ( self ) -> Optional[Any]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase_ = text[self.print_len :]
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
else:
lowerCAmelCase_ = ""
lowerCAmelCase_ = True
self.on_finalized_text(_UpperCamelCase , stream_end=_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase = False ) -> List[str]:
print(_UpperCamelCase , flush=_UpperCamelCase , end="" if not stream_end else None )
def __a ( self , _UpperCamelCase ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase ) -> List[Any]:
super().__init__(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = Queue()
lowerCAmelCase_ = None
lowerCAmelCase_ = timeout
def __a ( self , _UpperCamelCase , _UpperCamelCase = False ) -> List[Any]:
self.text_queue.put(_UpperCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Tuple:
return self
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 231 |
import functools
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
lowerCAmelCase_ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 | 1 |
import math
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [True] * n
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[str] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE : int = i * 2
while index < n:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = index + i
SCREAMING_SNAKE_CASE : int = [2]
for i in range(3 , a__ , 2 ):
if is_prime[i]:
primes.append(a__ )
return primes
def UpperCAmelCase_( a__ = 999_966_663_333 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = math.floor(math.sqrt(a__ ) ) + 100
SCREAMING_SNAKE_CASE : int = prime_sieve(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE : List[str] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE : int = last_prime**2
SCREAMING_SNAKE_CASE : Tuple = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE : List[Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 364 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Tuple = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
a__ : Optional[Any] = {'''mobilebert-uncased''': 512}
a__ : List[Any] = {}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = MobileBertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) ->Optional[int]:
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE : Optional[int] = strip_accents
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = do_lower_case
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 19 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( __UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
class UpperCAmelCase__ ( __UpperCAmelCase ,__UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , a_ : int = 16 , a_ : int = 88 , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : int = 1 , a_ : float = 0.0 , a_ : int = 32 , a_ : Optional[int] = None , a_ : bool = False , a_ : Optional[int] = None , a_ : str = "geglu" , a_ : bool = True , a_ : bool = True , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Union[str, Any] = attention_head_dim
__UpperCAmelCase : Optional[int] = num_attention_heads * attention_head_dim
__UpperCAmelCase : Any = in_channels
__UpperCAmelCase : Tuple = torch.nn.GroupNorm(num_groups=_lowerCamelCase , num_channels=_lowerCamelCase , eps=1e-6 , affine=_lowerCamelCase )
__UpperCAmelCase : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase )
# 3. Define transformers blocks
__UpperCAmelCase : Any = nn.ModuleList(
[
BasicTransformerBlock(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dropout=_lowerCamelCase , cross_attention_dim=_lowerCamelCase , activation_fn=_lowerCamelCase , attention_bias=_lowerCamelCase , double_self_attention=_lowerCamelCase , norm_elementwise_affine=_lowerCamelCase , )
for d in range(_lowerCamelCase )
] )
__UpperCAmelCase : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase )
def snake_case__ ( self : int , a_ : List[Any] , a_ : Any=None , a_ : List[Any]=None , a_ : int=None , a_ : Tuple=1 , a_ : Optional[int]=None , a_ : bool = True , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = hidden_states.shape
__UpperCAmelCase : Union[str, Any] = batch_frames // num_frames
__UpperCAmelCase : List[str] = hidden_states
__UpperCAmelCase : Tuple = hidden_states[None, :].reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__UpperCAmelCase : str = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__UpperCAmelCase : List[str] = self.norm(_lowerCamelCase )
__UpperCAmelCase : List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _lowerCamelCase , _lowerCamelCase )
__UpperCAmelCase : Dict = self.proj_in(_lowerCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__UpperCAmelCase : Optional[int] = block(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , timestep=_lowerCamelCase , cross_attention_kwargs=_lowerCamelCase , class_labels=_lowerCamelCase , )
# 3. Output
__UpperCAmelCase : List[str] = self.proj_out(_lowerCamelCase )
__UpperCAmelCase : Any = (
hidden_states[None, None, :]
.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__UpperCAmelCase : List[str] = hidden_states.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__UpperCAmelCase : List[str] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_lowerCamelCase )
| 226 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = """transfo-xl"""
__lowerCAmelCase : Optional[Any] = ["""mems"""]
__lowerCAmelCase : List[str] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , _lowerCamelCase : List[Any]=26_77_35 , _lowerCamelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _lowerCamelCase : str=10_24 , _lowerCamelCase : Union[str, Any]=10_24 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : int=64 , _lowerCamelCase : Optional[int]=40_96 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : str=False , _lowerCamelCase : Union[str, Any]=18 , _lowerCamelCase : Optional[Any]=16_00 , _lowerCamelCase : Optional[int]=10_00 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=0 , _lowerCamelCase : List[Any]=-1 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]="normal" , _lowerCamelCase : int=0.01 , _lowerCamelCase : List[str]=0.01 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=1E-5 , _lowerCamelCase : int=0 , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[Any] = vocab_size
A_ : str = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
A_ : str = [False] + [True] * len(self.cutoffs )
else:
A_ : str = [False] + [False] * len(self.cutoffs )
A_ : Optional[Any] = d_model
A_ : Dict = d_embed
A_ : List[str] = d_head
A_ : List[Any] = d_inner
A_ : Dict = div_val
A_ : int = pre_lnorm
A_ : Optional[Any] = n_layer
A_ : List[Any] = n_head
A_ : List[Any] = mem_len
A_ : Dict = same_length
A_ : Optional[Any] = attn_type
A_ : Any = clamp_len
A_ : Dict = sample_softmax
A_ : List[Any] = adaptive
A_ : Union[str, Any] = dropout
A_ : List[Any] = dropatt
A_ : Any = untie_r
A_ : Optional[int] = init
A_ : int = init_range
A_ : List[Any] = proj_init_std
A_ : Union[str, Any] = init_std
A_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a_ ( self : Any , _lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 167 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =FileLock(str(tmpdir / '''foo.lock''' ) )
__UpperCamelCase =FileLock(str(tmpdir / '''foo.lock''' ) )
__UpperCamelCase =0.0_1
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
__UpperCamelCase =time.time()
locka.acquire(__UpperCamelCase )
assert time.time() - _start > timeout
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase ='''a''' * 1_0_0_0 + '''.lock'''
__UpperCamelCase =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
__UpperCamelCase =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
locka.acquire(0 )
| 85 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 0:
return False
lowercase_ : str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
def a_ ( _A = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[int] = 'char'
A : Optional[int] = 'bpe'
A : List[Any] = 'wp'
lowercase : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['image_processor', 'char_tokenizer']
A : str = 'ViTImageProcessor'
A : Any = 'MgpstrTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> str:
snake_case_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = kwargs.pop("feature_extractor" )
snake_case_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
snake_case_ : Tuple = tokenizer
snake_case_ : str = AutoTokenizer.from_pretrained("gpt2" )
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> str:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
snake_case_ : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
snake_case_ : Union[str, Any] = self.char_tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case_ : Optional[int] = encodings["input_ids"]
return inputs
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ , snake_case_ , snake_case_ : int = sequences
snake_case_ : Tuple = char_preds.size(0 )
snake_case_ , snake_case_ : Union[str, Any] = self._decode_helper(_SCREAMING_SNAKE_CASE , "char" )
snake_case_ , snake_case_ : Union[str, Any] = self._decode_helper(_SCREAMING_SNAKE_CASE , "bpe" )
snake_case_ , snake_case_ : Dict = self._decode_helper(_SCREAMING_SNAKE_CASE , "wp" )
snake_case_ : Dict = []
snake_case_ : Tuple = []
for i in range(_SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case_ : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case_ : List[Any] = scores.index(max(_SCREAMING_SNAKE_CASE ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case_ : Union[str, Any] = {}
snake_case_ : Union[str, Any] = final_strs
snake_case_ : Optional[Any] = final_scores
snake_case_ : Tuple = char_strs
snake_case_ : Tuple = bpe_strs
snake_case_ : Union[str, Any] = wp_strs
return out
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if format == DecodeType.CHARACTER:
snake_case_ : int = self.char_decode
snake_case_ : Union[str, Any] = 1
snake_case_ : int = "[s]"
elif format == DecodeType.BPE:
snake_case_ : Any = self.bpe_decode
snake_case_ : List[Any] = 2
snake_case_ : List[Any] = "#"
elif format == DecodeType.WORDPIECE:
snake_case_ : str = self.wp_decode
snake_case_ : Tuple = 102
snake_case_ : Optional[Any] = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
snake_case_ , snake_case_ : Dict = [], []
snake_case_ : List[Any] = pred_logits.size(0 )
snake_case_ : Union[str, Any] = pred_logits.size(1 )
snake_case_ , snake_case_ : Tuple = pred_logits.topk(1 , dim=-1 , largest=_SCREAMING_SNAKE_CASE , sorted=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = preds_index.view(-1 , _SCREAMING_SNAKE_CASE )[:, 1:]
snake_case_ : Optional[Any] = decoder(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ : Optional[Any] = torch.nn.functional.softmax(_SCREAMING_SNAKE_CASE , dim=2 ).max(dim=2 )
snake_case_ : Dict = preds_max_prob[:, 1:]
for index in range(_SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[Any] = preds_str[index].find(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = preds_str[index][:pred_eos]
snake_case_ : Tuple = preds_index[index].cpu().tolist()
snake_case_ : Optional[Any] = pred_index.index(_SCREAMING_SNAKE_CASE ) if eos_token in pred_index else -1
snake_case_ : str = preds_max_prob[index][: pred_eos_index + 1]
snake_case_ : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_SCREAMING_SNAKE_CASE )
conf_scores.append(_SCREAMING_SNAKE_CASE )
return dec_strs, conf_scores
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ : Dict = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )]
return decode_strs
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
return self.bpe_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ : Optional[int] = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )]
return decode_strs
| 36 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Any = MobileBertTokenizer
A : Optional[int] = MobileBertTokenizerFast
A : Any = True
A : Optional[int] = True
A : List[str] = filter_non_english
A : Any = 'google/mobilebert-uncased'
def _lowerCAmelCase ( self ) -> List[str]:
super().setUp()
snake_case_ : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
snake_case_ : Dict = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ : int = "UNwant\u00E9d,running"
snake_case_ : Dict = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = self.tokenizer_class(self.vocab_file )
snake_case_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] )
def _lowerCAmelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : str = self.get_rust_tokenizer()
snake_case_ : str = "UNwant\u00E9d,running"
snake_case_ : int = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : int = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self.get_rust_tokenizer()
snake_case_ : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# With lower casing
snake_case_ : Optional[Any] = self.get_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self.get_rust_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
snake_case_ : int = "UNwant\u00E9d,running"
snake_case_ : Optional[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
snake_case_ : int = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : int = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Any = self.get_rust_tokenizer()
snake_case_ : str = tokenizer.encode(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Optional[int] = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[Any] = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Dict = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Union[str, Any] = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Dict = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Any = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Dict = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Any = BasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Tuple = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
snake_case_ : Optional[int] = {}
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[Any] = i
snake_case_ : List[Any] = WordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Optional[int] = self.get_tokenizer()
snake_case_ : Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
snake_case_ : int = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _lowerCAmelCase ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
snake_case_ : Any = tokenizer_r.encode_plus(
_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = tokenizer_r.do_lower_case if hasattr(_SCREAMING_SNAKE_CASE , "do_lower_case" ) else False
snake_case_ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Union[str, Any] = ["的", "人", "有"]
snake_case_ : int = "".join(_SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Tuple = True
snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = False
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ : Any = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case_ : Union[str, Any] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_SCREAMING_SNAKE_CASE )
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 36 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Dict) ->None:
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
| 14 |
"""simple docstring"""
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = data
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def _snake_case ( UpperCamelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _snake_case ( UpperCamelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _snake_case ( UpperCamelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _snake_case ( ): # Main function for testing.
UpperCAmelCase : int = Node(1 )
UpperCAmelCase : Tuple = Node(2 )
UpperCAmelCase : Any = Node(3 )
UpperCAmelCase : Optional[int] = Node(4 )
UpperCAmelCase : Any = Node(5 )
UpperCAmelCase : Optional[int] = Node(6 )
UpperCAmelCase : int = Node(7 )
UpperCAmelCase : str = Node(8 )
UpperCAmelCase : str = Node(9 )
print(is_full_binary_tree(UpperCamelCase ) )
print(depth_of_tree(UpperCamelCase ) )
print("""Tree is: """ )
display(UpperCamelCase )
if __name__ == "__main__":
main()
| 109 | 0 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A(__a: str ):
re.sub("<n>" , "" , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 22 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase = DDIMPipeline
_lowerCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
_lowerCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
_a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_a = DDIMScheduler()
_a = {'unet': unet, 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=0 ) -> Any:
if str(__magic_name__ ).startswith('mps' ):
_a = torch.manual_seed(__magic_name__ )
else:
_a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_a = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ) -> int:
_a = 'cpu'
_a = self.get_dummy_components()
_a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
_a = self.get_dummy_inputs(__magic_name__ )
_a = pipe(**__magic_name__ ).images
_a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_a = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1e-3 )
def __UpperCAmelCase ( self ) -> int:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Dict:
_a = 'google/ddpm-cifar10-32'
_a = UNetaDModel.from_pretrained(__magic_name__ )
_a = DDIMScheduler()
_a = DDIMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
ddim.to(__magic_name__ )
ddim.set_progress_bar_config(disable=__magic_name__ )
_a = torch.manual_seed(0 )
_a = ddim(generator=__magic_name__ , eta=0.0 , output_type='numpy' ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self ) -> Dict:
_a = 'google/ddpm-ema-bedroom-256'
_a = UNetaDModel.from_pretrained(__magic_name__ )
_a = DDIMScheduler.from_pretrained(__magic_name__ )
_a = DDIMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
ddpm.to(__magic_name__ )
ddpm.set_progress_bar_config(disable=__magic_name__ )
_a = torch.manual_seed(0 )
_a = ddpm(generator=__magic_name__ , output_type='numpy' ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_a = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 168 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = botoa.client("iam" )
lowercase__ : str = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCamelCase__ , AssumeRolePolicyDocument=json.dumps(lowerCamelCase__ , indent=2 ) )
lowercase__ : Tuple = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCamelCase__ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(lowerCamelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = botoa.client("iam" )
return iam_client.get_role(RoleName=lowerCamelCase__ )["Role"]["Arn"]
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , lowerCamelCase__ , )
lowercase__ : Any = None
if credentials_configuration == 0:
lowercase__ : int = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
lowercase__ : Tuple = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
lowercase__ : Any = _ask_field("AWS Access Key ID: " )
lowercase__ : List[Any] = aws_access_key_id
lowercase__ : int = _ask_field("AWS Secret Access Key: " )
lowercase__ : Optional[Any] = aws_secret_access_key
lowercase__ : Any = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
lowercase__ : Union[str, Any] = aws_region
lowercase__ : Tuple = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , lowerCamelCase__ , )
if role_management == 0:
lowercase__ : Tuple = _ask_field("Enter your IAM role name: " )
else:
lowercase__ : Optional[int] = "accelerate_sagemaker_execution_role"
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(lowerCamelCase__ )
lowercase__ : int = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : Any = None
if is_custom_docker_image:
lowercase__ : Tuple = _ask_field("Enter your Docker image: " , lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower() )
lowercase__ : Tuple = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : str = None
if is_sagemaker_inputs_enabled:
lowercase__ : str = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower() , )
lowercase__ : Tuple = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : List[str] = None
if is_sagemaker_metrics_enabled:
lowercase__ : Dict = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower() , )
lowercase__ : int = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
lowercase__ : Union[str, Any] = {}
lowercase__ : Union[str, Any] = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
if use_dynamo:
lowercase__ : int = "dynamo_"
lowercase__ : Optional[int] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowercase__ : Optional[Any] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
if use_custom_options:
lowercase__ : int = _ask_options(
"Which mode do you want to use?" , lowerCamelCase__ , lambda lowerCamelCase__ : TORCH_DYNAMO_MODES[int(lowerCamelCase__ )] , default="default" , )
lowercase__ : Any = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : Optional[int] = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase__ , error_message="Please enter yes or no." , )
lowercase__ : int = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
lowercase__ : str = _ask_options(
lowerCamelCase__ , lowerCamelCase__ , lambda lowerCamelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCamelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowercase__ : Union[str, Any] = _ask_field(lowerCamelCase__ , lambda lowerCamelCase__ : str(lowerCamelCase__ ).lower() , default="ml.p3.2xlarge" )
lowercase__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowercase__ : Optional[int] = _ask_field(
"How many machines do you want use? [1]: " , lowerCamelCase__ , default=1 , )
lowercase__ : Union[str, Any] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=lowerCamelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowerCamelCase__ , use_cpu=lowerCamelCase__ , dynamo_config=lowerCamelCase__ , eca_instance_type=lowerCamelCase__ , profile=lowerCamelCase__ , region=lowerCamelCase__ , iam_role_name=lowerCamelCase__ , mixed_precision=lowerCamelCase__ , num_machines=lowerCamelCase__ , sagemaker_inputs_file=lowerCamelCase__ , sagemaker_metrics_file=lowerCamelCase__ , )
| 367 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = "cpu" , lowerCamelCase__ = None ):
"""simple docstring"""
lowercase__ : Any = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCamelCase__ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase__ : int = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Optional[Any] = src_path
torch.save(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 121 | 0 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Dict = logging.get_logger()
# the current default level is logging.WARNING
a__: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
# restore to the original level
logging.set_verbosity(a__)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = logging.get_verbosity()
a__: Tuple = logging.get_logger('transformers.models.bart.tokenization_bart')
a__: Union[str, Any] = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__) as cl:
logger.warning(a__)
self.assertEqual(cl.out , msg + '\n')
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__) as cl:
logger.warning(a__)
self.assertEqual(cl.out , '')
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__) as cl:
logger.warning(a__)
self.assertEqual(cl.out , msg + '\n')
# restore to the original level
logging.set_verbosity(a__)
@mockenv(TRANSFORMERS_VERBOSITY='error')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
a__: Any = logging.get_logger('transformers.models.bart.tokenization_bart')
a__: Tuple = os.getenv('TRANSFORMERS_VERBOSITY' , a__)
a__: Tuple = logging.log_levels[env_level_str]
a__: List[Any] = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
a__: Tuple = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error')
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
a__: str = logging.logging.getLogger()
with CaptureLogger(a__) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart')
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out)
# no need to restore as nothing was changed
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
a__: Optional[Any] = logging.get_logger('transformers.models.bart.tokenization_bart')
a__: str = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'):
# nothing should be logged as env var disables this method
with CaptureLogger(a__) as cl:
logger.warning_advice(a__)
self.assertEqual(cl.out , '')
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__) as cl:
logger.warning_advice(a__)
self.assertEqual(cl.out , msg + '\n')
def __a ( ) ->Optional[Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 290 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = 0.01
with locka.acquire():
with pytest.raises(snake_case ):
snake_case_ = time.time()
locka.acquire(snake_case )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = "a" * 1_0_0_0 + ".lock"
snake_case_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case ):
locka.acquire(0 )
| 85 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : int ):
__A = 0
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__A = AutoTokenizer.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(A ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__A = AutoTokenizer.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(A ) ,0 )
def UpperCamelCase_ ( self : str ):
__A = AutoTokenizer.from_pretrained(A )
self.assertIsInstance(A ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def UpperCamelCase_ ( self : List[str] ):
__A = AutoTokenizer.from_pretrained(A )
self.assertIsInstance(A ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def UpperCamelCase_ ( self : List[str] ):
__A = AutoConfig.from_pretrained(A )
self.assertIsInstance(A ,A )
# Check that tokenizer_type ≠ model_type
__A = AutoTokenizer.from_pretrained(A ,config=A )
self.assertIsInstance(A ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def UpperCamelCase_ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" ,os.path.join(A ,"vocab.txt" ) )
__A = AutoTokenizer.from_pretrained(A ,tokenizer_type="bert" ,use_fast=A )
self.assertIsInstance(A ,A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" ,os.path.join(A ,"vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" ,os.path.join(A ,"merges.txt" ) )
__A = AutoTokenizer.from_pretrained(A ,tokenizer_type="gpt2" ,use_fast=A )
self.assertIsInstance(A ,A )
@require_tokenizers
def UpperCamelCase_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" ,os.path.join(A ,"vocab.txt" ) )
__A = AutoTokenizer.from_pretrained(A ,tokenizer_type="bert" )
self.assertIsInstance(A ,A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" ,os.path.join(A ,"vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" ,os.path.join(A ,"merges.txt" ) )
__A = AutoTokenizer.from_pretrained(A ,tokenizer_type="gpt2" )
self.assertIsInstance(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
with pytest.raises(A ):
AutoTokenizer.from_pretrained("./" ,tokenizer_type="xxx" )
@require_tokenizers
def UpperCamelCase_ ( self : Dict ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__A = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(A ,(BertTokenizer, BertTokenizerFast) )
if isinstance(A ,A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,A )
else:
self.assertEqual(tokenizer.do_lower_case ,A )
self.assertEqual(tokenizer.model_max_length ,5_12 )
@require_tokenizers
def UpperCamelCase_ ( self : Any ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
A ,"julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" ,):
__A = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def UpperCamelCase_ ( self : Optional[int] ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__A = TOKENIZER_MAPPING.values()
__A = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(A )
@require_tokenizers
def UpperCamelCase_ ( self : Optional[int] ):
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ,use_fast=A ) ,A )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) ,A )
@require_tokenizers
def UpperCamelCase_ ( self : Tuple ):
__A = AutoTokenizer.from_pretrained("distilbert-base-uncased" ,do_lower_case=A )
__A = "Hello, world. How are you?"
__A = tokenizer.tokenize(A )
self.assertEqual("[UNK]" ,tokens[0] )
__A = AutoTokenizer.from_pretrained("microsoft/mpnet-base" ,do_lower_case=A )
__A = tokenizer.tokenize(A )
self.assertEqual("[UNK]" ,tokens[0] )
@require_tokenizers
def UpperCamelCase_ ( self : Optional[int] ):
__A = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(A ) ,A )
self.assertEqual(tokenizer.model_max_length ,5_12 )
self.assertEqual(tokenizer.vocab_size ,3_00_00 )
self.assertEqual(tokenizer.unk_token ,"[UNK]" )
self.assertEqual(tokenizer.padding_side ,"right" )
self.assertEqual(tokenizer.truncation_side ,"right" )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = AutoTokenizer.from_pretrained(A )
self.assertIsInstance(A ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A )
__A = AutoTokenizer.from_pretrained(A )
self.assertIsInstance(A ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def UpperCamelCase_ ( self : Dict ):
__A = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(A ,A )
def UpperCamelCase_ ( self : List[str] ):
# Check we can load the tokenizer config of an online model.
__A = get_tokenizer_config("bert-base-cased" )
__A = config.pop("_commit_hash" ,A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(A ,{"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__A = get_tokenizer_config(A )
self.assertDictEqual(A ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__A = AutoTokenizer.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A )
__A = get_tokenizer_config(A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] ,"BertTokenizer" )
def UpperCamelCase_ ( self : Tuple ):
try:
AutoConfig.register("custom" ,A )
AutoTokenizer.register(A ,slow_tokenizer_class=A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A ):
AutoTokenizer.register(A ,slow_tokenizer_class=A )
__A = CustomTokenizer.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A )
__A = AutoTokenizer.from_pretrained(A )
self.assertIsInstance(A ,A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase_ ( self : Optional[int] ):
try:
AutoConfig.register("custom" ,A )
# Can register in two steps
AutoTokenizer.register(A ,slow_tokenizer_class=A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(A ,fast_tokenizer_class=A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
A ,slow_tokenizer_class=A ,fast_tokenizer_class=A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A ):
AutoTokenizer.register(A ,fast_tokenizer_class=A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__A = BertTokenizerFast.from_pretrained(A )
bert_tokenizer.save_pretrained(A )
__A = CustomTokenizerFast.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A )
__A = AutoTokenizer.from_pretrained(A )
self.assertIsInstance(A ,A )
__A = AutoTokenizer.from_pretrained(A ,use_fast=A )
self.assertIsInstance(A ,A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A ):
__A = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A ):
__A = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" ,trust_remote_code=A )
__A = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ,trust_remote_code=A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A )
__A = AutoTokenizer.from_pretrained(A ,trust_remote_code=A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,"NewTokenizerFast" )
# Test we can also load the slow version
__A = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" ,trust_remote_code=A ,use_fast=A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A )
__A = AutoTokenizer.from_pretrained(A ,trust_remote_code=A ,use_fast=A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,"NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,"NewTokenizer" )
@require_tokenizers
def UpperCamelCase_ ( self : Tuple ):
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = False
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = NewTokenizer
snake_case_ = False
try:
AutoConfig.register("custom" ,A )
AutoTokenizer.register(A ,slow_tokenizer_class=A )
AutoTokenizer.register(A ,fast_tokenizer_class=A )
# If remote code is not set, the default is to use local
__A = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__A = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ,use_fast=A )
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__A = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" ,trust_remote_code=A )
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__A = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" ,trust_remote_code=A ,use_fast=A )
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__A = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" ,trust_remote_code=A )
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
__A = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" ,trust_remote_code=A ,use_fast=A )
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self : List[Any] ):
__A = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" ,trust_remote_code=A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizerFast" )
# Test we can also load the slow version
__A = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" ,trust_remote_code=A ,use_fast=A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizer" )
def UpperCamelCase_ ( self : Optional[int] ):
with self.assertRaisesRegex(
A ,"bert-base is not a local folder and is not a valid model identifier" ):
__A = AutoTokenizer.from_pretrained("bert-base" )
def UpperCamelCase_ ( self : Dict ):
with self.assertRaisesRegex(
A ,R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__A = AutoTokenizer.from_pretrained(A ,revision="aaaaaa" )
def UpperCamelCase_ ( self : Dict ):
# Make sure we have cached the tokenizer.
__A = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__A = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 367 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
__A = dict(zip(A ,range(len(A ) ) ) )
__A = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
__A = {"unk_token": "<unk>"}
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
def UpperCamelCase_ ( self : List[str] ,**A : List[str] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[int] ,A : Tuple ):
__A = "adapt react readapt apt"
__A = "adapt react readapt apt"
return input_text, output_text
def UpperCamelCase_ ( self : Any ):
__A = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__A = "adapt react readapt apt"
__A = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
__A = tokens + [tokenizer.unk_token]
__A = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
| 124 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
_lowerCAmelCase : Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCAmelCase_ ( a):
def __init__( self, __a = 101):
'''simple docstring'''
_lowerCAmelCase : str = length
def __len__( self):
'''simple docstring'''
return self.length
def __getitem__( self, __a):
'''simple docstring'''
return i
class UpperCAmelCase_ :
def __call__( self, __a):
'''simple docstring'''
return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)}
class UpperCAmelCase_ ( nn.Module):
def __init__( self):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCAmelCase : str = nn.Linear(120, 80)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class UpperCAmelCase_ ( a):
@require_torch_neuroncore
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : List[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCAmelCase_ ( a):
@require_torch_multi_gpu
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split()
_lowerCAmelCase : Any = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case = HfArgumentParser((TrainingArguments,))
_snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_snake_case = DummyDataset(dataset_length)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(range(len(_lowerCamelCase ) ) )
_lowerCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
_snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = 2
_snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case = None
| 36 | 1 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(length - 1 ):
lowercase__ = i
for k in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if collection[k] < collection[least]:
lowercase__ = k
if least != i:
lowercase__ , lowercase__ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 224 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class _snake_case :
UpperCamelCase__ : Optional[str] =field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""})
UpperCamelCase__ : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
UpperCamelCase__ : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """The column name of the images in the files."""})
UpperCamelCase__ : Optional[str] =field(default=lowercase__ , metadata={"""help""": """A folder containing the training data."""})
UpperCamelCase__ : Optional[str] =field(default=lowercase__ , metadata={"""help""": """A folder containing the validation data."""})
UpperCamelCase__ : Optional[float] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""})
UpperCamelCase__ : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self : Tuple ):
lowercase__ = {}
if self.train_dir is not None:
lowercase__ = self.train_dir
if self.validation_dir is not None:
lowercase__ = self.validation_dir
lowercase__ = data_files if data_files else None
@dataclass
class _snake_case :
UpperCamelCase__ : str =field(
default=lowercase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase__ : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""})
UpperCamelCase__ : Optional[str] =field(
default=lowercase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase__ : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""})
UpperCamelCase__ : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase__ : str =field(default=lowercase__ , metadata={"""help""": """Name or path of preprocessor config."""})
UpperCamelCase__ : bool =field(
default=lowercase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase__ : float =field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""})
UpperCamelCase__ : bool =field(
default=lowercase__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""})
@dataclass
class _snake_case ( lowercase__):
UpperCamelCase__ : float =field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""})
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def __lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE_ ) and data_args.train_val_split > 0.0:
lowercase__ = ds["train"].train_test_split(data_args.train_val_split )
lowercase__ = split["train"]
lowercase__ = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE_ )
elif model_args.model_name_or_path:
lowercase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE_ )
elif model_args.model_name_or_path:
lowercase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowercase__ = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
if training_args.do_train:
lowercase__ = ds["train"].column_names
else:
lowercase__ = ds["validation"].column_names
if data_args.image_column_name is not None:
lowercase__ = data_args.image_column_name
elif "image" in column_names:
lowercase__ = "image"
elif "img" in column_names:
lowercase__ = "img"
else:
lowercase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase__ = image_processor.size["shortest_edge"]
else:
lowercase__ = (image_processor.size["height"], image_processor.size["width"])
lowercase__ = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(SCREAMING_SNAKE_CASE_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(SCREAMING_SNAKE_CASE_ ):
lowercase__ = [transforms(SCREAMING_SNAKE_CASE_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowercase__ = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowercase__ = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE_ )
# Compute absolute learning rate
lowercase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowercase__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
# Write model card and (optionally) push to hub
lowercase__ = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 224 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = ["""pixel_values"""]
def __init__( self : List[str] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : Tuple , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = size if size is not None else {"shortest_edge": 3_8_4}
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCAmelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : Any , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : float , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Optional[int] , ):
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_UpperCAmelCase = size["shortest_edge"]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCAmelCase = int(shortest_edge / crop_pct )
_UpperCAmelCase = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case_ , size=(shortest_edge, shortest_edge) , data_format=snake_case_ , **snake_case_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case_ , size=(shortest_edge, shortest_edge) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : str , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 22 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : int = PegasusTokenizerFast
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = True
def lowercase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Any ):
return ("This is a test", "This is a test")
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = "</s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(snake_case_ ) , 1_1_0_3 )
def lowercase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
_UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
_UpperCAmelCase = "To ensure a smooth flow of bank resolutions."
_UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self : int ):
_UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self : Dict ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : List[Any] = PegasusTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = True
def lowercase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowercase ( self : Optional[Any] , **snake_case_ : Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return ("This is a test", "This is a test")
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
@require_torch
def lowercase ( self : Tuple ):
_UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
_UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids
self.assertListEqual(
snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 22 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowercase_ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
A__ : Any = StableDiffusionControlNetImgaImgPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
A__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
UpperCamelCase_ = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase_ = CLIPTextModel(lowercase_ )
UpperCamelCase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("""mps""" ):
UpperCamelCase_ = torch.manual_seed(lowercase_ )
else:
UpperCamelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCamelCase_ = 2
UpperCamelCase_ = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=lowercase_ , device=torch.device(lowercase_ ) , )
UpperCamelCase_ = floats_tensor(control_image.shape , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert("""RGB""" ).resize((6_4, 6_4) )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowercase_ ( lowercase__ , lowercase__ , unittest.TestCase ):
A__ : Any = StableDiffusionControlNetImgaImgPipeline
A__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A__ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A__ : Union[str, Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(__UpperCamelCase ):
if isinstance(lowercase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase_ = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(lowercase_ )
torch.manual_seed(0 )
UpperCamelCase_ = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(lowercase_ )
torch.manual_seed(0 )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase_ = CLIPTextModel(lowercase_ )
UpperCamelCase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase_ = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("""mps""" ):
UpperCamelCase_ = torch.manual_seed(lowercase_ )
else:
UpperCamelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCamelCase_ = 2
UpperCamelCase_ = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=lowercase_ , device=torch.device(lowercase_ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=lowercase_ , device=torch.device(lowercase_ ) , ),
]
UpperCamelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert("""RGB""" ).resize((6_4, 6_4) )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
UpperCamelCase_ = 10.0
UpperCamelCase_ = 4
UpperCamelCase_ = self.get_dummy_inputs(lowercase_ )
UpperCamelCase_ = steps
UpperCamelCase_ = scale
UpperCamelCase_ = pipe(**lowercase_ )[0]
UpperCamelCase_ = self.get_dummy_inputs(lowercase_ )
UpperCamelCase_ = steps
UpperCamelCase_ = scale
UpperCamelCase_ = pipe(**lowercase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase_ = self.get_dummy_inputs(lowercase_ )
UpperCamelCase_ = steps
UpperCamelCase_ = scale
UpperCamelCase_ = pipe(**lowercase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase_ = self.get_dummy_inputs(lowercase_ )
UpperCamelCase_ = steps
UpperCamelCase_ = scale
UpperCamelCase_ = pipe(**lowercase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowercase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
UpperCamelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase_ , controlnet=lowercase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase_ = "evil space-punk bird"
UpperCamelCase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_1_2, 5_1_2) )
UpperCamelCase_ = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_1_2, 5_1_2) )
UpperCamelCase_ = pipe(
lowercase_ , lowercase_ , control_image=lowercase_ , generator=lowercase_ , output_type="""np""" , num_inference_steps=5_0 , strength=0.6 , )
UpperCamelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
UpperCamelCase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9e-2
| 351 |
def lowerCamelCase__ ( a__ : List[Any] ) -> Optional[int]:
UpperCamelCase_ = len(a__ )
while cur > 1:
# Find the maximum number in arr
UpperCamelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCamelCase_ = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
UpperCamelCase_ = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
_A = input('''Enter numbers separated by a comma:\n''').strip()
_A = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 261 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( lowerCamelCase ):
for param in module.parameters():
lowerCamelCase : Union[str, Any] = False
def _a ( ):
lowerCamelCase : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCamelCase : Optional[int] = '''mps'''
if device == "mps":
print(
"""WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"""
""" with generations.""" )
return device
def _a ( lowerCamelCase ):
lowerCamelCase : List[Any] = plt.imshow(lowerCamelCase )
fig.axes.get_xaxis().set_visible(lowerCamelCase )
fig.axes.get_yaxis().set_visible(lowerCamelCase )
plt.show()
def _a ( ):
lowerCamelCase : List[Any] = datetime.now()
lowerCamelCase : Tuple = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 287 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: Any = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_A: Optional[int] = DatasetInfosDict.from_directory(a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: int = str(a )
dataset_info.write_to_directory(a )
_A: str = DatasetInfo.from_directory(a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a , '''dataset_info.json''' ) )
def lowerCamelCase__ ( ) -> Any:
_A: int = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
_A: Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_A: str = yaml.safe_dump(a )
_A: Optional[int] = yaml.safe_load(a )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase__ ( ) -> int:
_A: Union[str, Any] = DatasetInfo()
_A: Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def lowerCamelCase__ ( a , a ) -> Optional[int]:
_A: Optional[int] = str(a )
dataset_infos_dict.write_to_directory(a )
_A: Union[str, Any] = DatasetInfosDict.from_directory(a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_A: Optional[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_A: Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a , '''README.md''' ) )
| 121 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a__( _a ):
def lowercase_ ( self : Optional[int] ):
a : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_a , 'depth_multiplier' ) )
class a__:
def __init__( self : Any , __snake_case : List[str] , __snake_case : Optional[Any]=13 , __snake_case : Dict=3 , __snake_case : str=32 , __snake_case : List[Any]=0.25 , __snake_case : List[Any]=8 , __snake_case : Optional[Any]=True , __snake_case : Optional[int]=10_24 , __snake_case : int=32 , __snake_case : Optional[Any]="relu6" , __snake_case : Tuple=0.1 , __snake_case : Any=0.02 , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : List[str]=10 , __snake_case : Dict=None , ):
a : Optional[int] = parent
a : List[str] = batch_size
a : List[Any] = num_channels
a : List[Any] = image_size
a : Dict = depth_multiplier
a : Optional[int] = min_depth
a : str = tf_padding
a : str = int(last_hidden_size * depth_multiplier )
a : Any = output_stride
a : str = hidden_act
a : Optional[int] = classifier_dropout_prob
a : Any = use_labels
a : Any = is_training
a : str = num_labels
a : Dict = initializer_range
a : Optional[int] = scope
def lowercase_ ( self : Optional[int] ):
a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : int = None
a : Tuple = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.num_labels )
a : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self : Union[str, Any] ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : int , __snake_case : Optional[Any] ):
a : Optional[Any] = MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
a : Union[str, Any] = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self : str , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[int] ):
a : Tuple = self.num_labels
a : str = MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
a : Optional[int] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.prepare_config_and_inputs()
a : Dict = config_and_inputs
a : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__( _a , _a , unittest.TestCase ):
lowercase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : List[Any] ):
a : Optional[int] = MobileNetVaModelTester(self )
a : str = MobileNetVaConfigTester(self , config_class=_a , has_text_modality=_a )
def lowercase_ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def lowercase_ ( self : Optional[Any] ):
pass
def lowercase_ ( self : List[str] ):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[Any] = model_class(_a )
a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def lowercase_ ( self : Tuple ):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowercase_ ( self : Optional[Any] ):
def check_hidden_states_output(__snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any ):
a : List[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
a : str = model(**self._prepare_for_class(_a , _a ) )
a : Dict = outputs.hidden_states
a : List[Any] = 26
self.assertEqual(len(_a ) , _a )
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
def lowercase_ ( self : Any ):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def lowercase_ ( self : Optional[int] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[int] = MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase__ ( ):
a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__( unittest.TestCase ):
@cached_property
def lowercase_ ( self : int ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def lowercase_ ( self : int ):
a : Any = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(_a )
a : Optional[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Optional[int] = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
a : Optional[int] = model(**_a )
# verify the logits
a : Tuple = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , _a )
a : int = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 370 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowerCAmelCase: Optional[int] = parser.parse_args()
lowerCAmelCase: List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase: Optional[Any] = CLIPImageProcessor()
lowerCAmelCase: Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowerCAmelCase: List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 96 | 0 |
import unittest
import numpy as np
def __UpperCamelCase ( _A , _A , _A , _A = None , ):
lowerCAmelCase_ = np.shape(_A )
lowerCAmelCase_ = np.shape(_A )
lowerCAmelCase_ = np.shape(_A )
if shape_a[0] != shape_b[0]:
lowerCAmelCase_ = (
"""Expected the same number of rows for A and B. """
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(_A )
if shape_b[1] != shape_c[1]:
lowerCAmelCase_ = (
"""Expected the same number of columns for B and C. """
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(_A )
lowerCAmelCase_ = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase_ = np.linalg.inv(_A )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase_ = np.array([[2, 1], [6, 3]] )
lowerCAmelCase_ = schur_complement(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = np.block([[a, b], [b.T, c]] )
lowerCAmelCase_ = np.linalg.det(UpperCamelCase__ )
lowerCAmelCase_ = np.linalg.det(UpperCamelCase__ )
lowerCAmelCase_ = np.linalg.det(UpperCamelCase__ )
self.assertAlmostEqual(UpperCamelCase__, det_a * det_s )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase__ ):
schur_complement(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase__ ):
schur_complement(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 278 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : Optional[int] = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124 | 0 |
UpperCAmelCase : Union[str, Any] = 0 # The first color of the flag.
UpperCAmelCase : Optional[int] = 1 # The second color of the flag.
UpperCAmelCase : int = 2 # The third color of the flag.
UpperCAmelCase : Any = (red, white, blue)
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
lowerCamelCase = 0
lowerCamelCase = len(_A ) - 1
lowerCamelCase = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase = sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : str = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase : Dict = [int(item.strip()) for item in user_input.split(",")]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 353 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {"vocab_file": "spiece.model"}
UpperCAmelCase : Optional[int] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , A = None , **A , ) -> None:
'''simple docstring'''
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCamelCase = jieba
lowerCamelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __A ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , A ) -> Any:
'''simple docstring'''
if self.remove_space:
lowerCamelCase = """ """.join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("""NFKD""" , A )
lowerCamelCase = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def __A ( self , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.preprocess_text(A )
lowerCamelCase = self.sp_model.encode(A , out_type=A )
lowerCamelCase = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.PieceToId(A )
def __A ( self , A ) -> int:
'''simple docstring'''
return self.sp_model.IdToPiece(A )
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = """""".join(A ).replace(A , """ """ ).strip()
return out_string
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1, 1]
return ([0] * len(A )) + [1, 1]
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def __A ( self , *A , **A ) -> int:
'''simple docstring'''
lowerCamelCase = super()._decode(*A , **A )
lowerCamelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 66 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowercase__ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
_SCREAMING_SNAKE_CASE = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
_SCREAMING_SNAKE_CASE = field(
default=1024, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={"""help""": """A csv or a json file containing the training data."""} )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={"""help""": """A csv or a json file containing the validation data."""} )
_SCREAMING_SNAKE_CASE = field(default=lowercase_, metadata={"""help""": """A csv or a json file containing the test data."""} )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
lowerCAmelCase_ : Optional[Any] = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase_ : str = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
_SCREAMING_SNAKE_CASE = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
_SCREAMING_SNAKE_CASE = field(
default=lowercase_, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def UpperCamelCase_ ( ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase_ : str = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase_ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase_ : Optional[int] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase_ : List[str] = data_args.train_file.split('.' )[-1]
lowerCAmelCase_ : List[str] = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase_ : List[str] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
lowerCAmelCase_ : Optional[int] = load_dataset('csv' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase_ : str = load_dataset('json' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase_ : List[Any] = raw_datasets['train'].features['label'].names
lowerCAmelCase_ : str = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase_ : int = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase__ , )
lowerCAmelCase_ : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase_ : List[str] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase_ : Tuple = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase_ : Dict = {'Refused': 0, 'Entailed': 1}
lowerCAmelCase_ : Union[str, Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCAmelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ : Optional[Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ : Any ):
lowerCAmelCase_ : Any = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
lowerCAmelCase_ : Optional[int] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase_ : str = examples['statement']
lowerCAmelCase_ : List[Any] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
lowerCAmelCase_ : Any = tokenizer(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
lowerCAmelCase_ : Tuple = raw_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCAmelCase_ : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCAmelCase_ : int = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCAmelCase_ : List[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCAmelCase_ : str = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
lowerCAmelCase_ : Optional[int] = raw_datasets['test']
if data_args.max_predict_samples is not None:
lowerCAmelCase_ : Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ : EvalPrediction ):
lowerCAmelCase_ : Tuple = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase__ ) else p.predictions
lowerCAmelCase_ : Tuple = np.argmax(lowerCAmelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase_ : Tuple = default_data_collator
elif training_args.fpaa:
lowerCAmelCase_ : Optional[Any] = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase_ : List[str] = None
# Initialize our Trainer
lowerCAmelCase_ : List[str] = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowerCAmelCase_ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ : Union[str, Any] = last_checkpoint
lowerCAmelCase_ : List[str] = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = train_result.metrics
lowerCAmelCase_ : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCAmelCase_ : Optional[Any] = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCAmelCase__ )
trainer.save_metrics('train' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase_ : List[Any] = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('eval' , lowerCAmelCase__ )
trainer.save_metrics('eval' , lowerCAmelCase__ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase_ : Optional[Any] = predict_dataset.remove_columns('label' )
lowerCAmelCase_ : Optional[int] = trainer.predict(lowerCAmelCase__ , metric_key_prefix='predict' ).predictions
lowerCAmelCase_ : Tuple = np.argmax(lowerCAmelCase__ , axis=1 )
lowerCAmelCase_ : Any = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCAmelCase_ : int = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCAmelCase_ : Any = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 224 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 224 | 1 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __A , )
class UpperCamelCase_ (__A ):
__magic_name__ = RobertaConfig
__magic_name__ = '''roberta'''
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple ) -> Dict:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = RobertaEmbeddings(lowerCAmelCase_ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __A , )
class UpperCamelCase_ (__A ):
__magic_name__ = RobertaConfig
__magic_name__ = '''roberta'''
def __init__( self : str , lowerCAmelCase_ : Any ) -> str:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = config.num_labels
UpperCAmelCase_ : Union[str, Any] = config.num_hidden_layers
UpperCAmelCase_ : Dict = DeeRobertaModel(lowerCAmelCase_ )
UpperCAmelCase_ : int = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ : Union[str, Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=-1 , lowerCAmelCase_ : int=False , ) -> Union[str, Any]:
UpperCAmelCase_ : int = self.num_layers
try:
UpperCAmelCase_ : Union[str, Any] = self.roberta(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , )
UpperCAmelCase_ : Dict = outputs[1]
UpperCAmelCase_ : Union[str, Any] = self.dropout(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.classifier(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ : Optional[int] = e.message
UpperCAmelCase_ : List[Any] = e.exit_layer
UpperCAmelCase_ : Union[str, Any] = outputs[0]
if not self.training:
UpperCAmelCase_ : List[Any] = entropy(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : List[Any] = MSELoss()
UpperCAmelCase_ : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : List[str] = CrossEntropyLoss()
UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_ : Dict = []
for highway_exit in outputs[-1]:
UpperCAmelCase_ : str = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCAmelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : int = MSELoss()
UpperCAmelCase_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : Optional[Any] = CrossEntropyLoss()
UpperCAmelCase_ : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCAmelCase_ )
if train_highway:
UpperCAmelCase_ : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ : Union[str, Any] = (loss,) + outputs
if not self.training:
UpperCAmelCase_ : str = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 352 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''longformer'''
def __init__( self : List[str] , lowerCAmelCase_ : Union[List[int], int] = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 30_522 , lowerCAmelCase_ : int = 768 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 3_072 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1e-12 , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Optional[int] , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = attention_window
UpperCAmelCase_ : Dict = sep_token_id
UpperCAmelCase_ : Any = bos_token_id
UpperCAmelCase_ : Dict = eos_token_id
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = onnx_export
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : "PretrainedConfig" , lowerCAmelCase_ : str = "default" , lowerCAmelCase_ : "List[PatchingSpec]" = None ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = True
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Dict = super().outputs
if self.task == "default":
UpperCAmelCase_ : List[str] = {0: "batch"}
return outputs
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
UpperCAmelCase_ : Tuple = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : str = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ : Union[str, Any] = 1
return inputs
| 253 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( __lowerCamelCase = "" ) -> dict[str, float]:
lowercase__ : Optional[int] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
lowercase__ : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , '''html.parser''' )
lowercase__ : Dict = soup.find_all('''td''' , attrs='''titleColumn''' )
lowercase__ : List[str] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__lowerCamelCase , __lowerCamelCase )
}
def __UpperCAmelCase ( __lowerCamelCase = "IMDb_Top_250_Movies.csv" ) -> None:
lowercase__ : str = get_imdb_top_aaa_movies()
with open(__lowerCamelCase , '''w''' , newline='''''' ) as out_file:
lowercase__ : Dict = csv.writer(__lowerCamelCase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 16 |
"""simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=7_6_8 )->Tuple:
super().__init__(__UpperCamelCase )
_UpperCAmelCase = proj_size
_UpperCAmelCase = CLIPVisionModel(__UpperCamelCase )
_UpperCAmelCase = PaintByExampleMapper(__UpperCamelCase )
_UpperCAmelCase = nn.LayerNorm(config.hidden_size )
_UpperCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_UpperCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowercase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str=False )->Optional[Any]:
_UpperCAmelCase = self.model(pixel_values=__UpperCamelCase )
_UpperCAmelCase = clip_output.pooler_output
_UpperCAmelCase = self.mapper(latent_states[:, None] )
_UpperCAmelCase = self.final_layer_norm(__UpperCamelCase )
_UpperCAmelCase = self.proj_out(__UpperCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _a ( nn.Module):
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : int )->Dict:
super().__init__()
_UpperCAmelCase = (config.num_hidden_layers + 1) // 5
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = 1
_UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , activation_fn='''gelu''' , attention_bias=__UpperCamelCase )
for _ in range(__UpperCamelCase )
] )
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] )->Union[str, Any]:
for block in self.blocks:
_UpperCAmelCase = block(__UpperCamelCase )
return hidden_states
| 356 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''MLukeTokenizer'''
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['''entity_predictions.bias''']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' )
_UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Tokyo is the capital of <mask>.'''
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = encoding['''input_ids'''][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 0 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = model.config
a : List[str] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
a : str = MBartConfig(
is_decoder=A_ , is_encoder_decoder=A_ , add_cross_attention=A_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=A_ , add_final_layer_norm=A_ , )
return encoder_config, decoder_config
def lowercase ( A_ )-> Dict:
'''simple docstring'''
if "encoder.model" in name:
a : List[str] = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
a : Union[str, Any] = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
a : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : int = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
a : List[str] = "encoder." + name
if "attn.proj" in name:
a : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : str = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : List[Any] = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
a : Tuple = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
a : Union[str, Any] = "encoder.layernorm.bias"
return name
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : Tuple = orig_state_dict.pop(A_ )
if "qkv" in key:
a : Any = key.split("." )
a : int = int(key_split[3] )
a : Optional[Any] = int(key_split[5] )
a : Dict = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Any = val[:dim, :]
a : Tuple = val[dim : dim * 2, :]
a : Optional[Any] = val[-dim:, :]
else:
a : List[str] = val[:dim]
a : List[str] = val[dim : dim * 2]
a : Tuple = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
a : Union[str, Any] = val
return orig_state_dict
def lowercase ( A_ , A_=None , A_=False )-> List[str]:
'''simple docstring'''
a : Dict = DonutModel.from_pretrained(A_ ).eval()
# load HuggingFace model
a , a : Optional[int] = get_configs(A_ )
a : Union[str, Any] = DonutSwinModel(A_ )
a : Union[str, Any] = MBartForCausalLM(A_ )
a : str = VisionEncoderDecoderModel(encoder=A_ , decoder=A_ )
model.eval()
a : Optional[Any] = original_model.state_dict()
a : Union[str, Any] = convert_state_dict(A_ , A_ )
model.load_state_dict(A_ )
# verify results on scanned document
a : Optional[Any] = load_dataset("hf-internal-testing/example-documents" )
a : Tuple = dataset["test"][0]["image"].convert("RGB" )
a : Any = XLMRobertaTokenizerFast.from_pretrained(A_ , from_slow=A_ )
a : Dict = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
a : Tuple = DonutProcessor(A_ , A_ )
a : Union[str, Any] = processor(A_ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
a : List[str] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : int = "When is the coffee break?"
a : int = task_prompt.replace("{user_input}" , A_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
a : Dict = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
a : Dict = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
a : List[Any] = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
a : int = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
a : List[str] = "hello world"
else:
raise ValueError("Model name not supported" )
a : Tuple = original_model.decoder.tokenizer(A_ , add_special_tokens=A_ , return_tensors="pt" )[
"input_ids"
]
a : Optional[int] = original_model.encoder.model.patch_embed(A_ )
a , a : int = model.encoder.embeddings(A_ )
assert torch.allclose(A_ , A_ , atol=1e-3 )
# verify encoder hidden states
a : List[str] = original_model.encoder(A_ )
a : Dict = model.encoder(A_ ).last_hidden_state
assert torch.allclose(A_ , A_ , atol=1e-2 )
# verify decoder hidden states
a : Union[str, Any] = original_model(A_ , A_ , A_ ).logits
a : List[str] = model(A_ , decoder_input_ids=A_ ).logits
assert torch.allclose(A_ , A_ , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
__lowercase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 96 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( _lowercase):
_a : Optional[int] = '''new-model'''
if is_tf_available():
class _a ( _lowercase):
_a : List[Any] = NewModelConfig
@require_tf
class _a ( unittest.TestCase):
@slow
def UpperCAmelCase__( self : List[str] )-> List[str]:
lowerCAmelCase__ : List[str] = '''bert-base-cased'''
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : int )-> Optional[Any]:
lowerCAmelCase__ : Tuple = '''bert-base-cased'''
lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Optional[int] )-> str:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ : Any = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Union[str, Any] )-> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : int )-> Optional[Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Union[str, Any] )-> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Union[str, Any] )-> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_probability
def UpperCAmelCase__( self : Dict )-> Any:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int )-> str:
lowerCAmelCase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_4410 )
def UpperCAmelCase__( self : Any )-> Tuple:
lowerCAmelCase__ : List[str] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_4410 )
def UpperCAmelCase__( self : Dict )-> Optional[int]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
lowerCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = copy.deepcopy(model.config )
lowerCAmelCase__ : Union[str, Any] = ['''FunnelBaseModel''']
lowerCAmelCase__ : Dict = TFAutoModel.from_config(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int )-> Tuple:
try:
AutoConfig.register('''new-model''' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
auto_class.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auto_class.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
auto_class.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : Tuple = BertModelTester(self ).get_config()
lowerCAmelCase__ : Optional[int] = NewModelConfig(**tiny_config.to_dict() )
lowerCAmelCase__ : List[Any] = auto_class.from_config(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = auto_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCAmelCase__( self : int )-> List[str]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''bert-base''' )
def UpperCAmelCase__( self : Optional[int] )-> Union[str, Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase__ : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def UpperCAmelCase__( self : Dict )-> Tuple:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
lowerCAmelCase__ : Optional[int] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCAmelCase__( self : List[Any] )-> Dict:
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ):
lowerCAmelCase__ : str = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def UpperCAmelCase__( self : int )-> Any:
# Make sure we have cached the model.
lowerCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
lowerCAmelCase__ : Any = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 211 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _a ( _lowercase):
_a : Optional[int] = '''xlm-roberta-xl'''
def __init__( self : Any , _SCREAMING_SNAKE_CASE : str=25_0880 , _SCREAMING_SNAKE_CASE : Optional[Any]=2560 , _SCREAMING_SNAKE_CASE : int=36 , _SCREAMING_SNAKE_CASE : Optional[int]=32 , _SCREAMING_SNAKE_CASE : Any=1_0240 , _SCREAMING_SNAKE_CASE : List[str]="gelu" , _SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=514 , _SCREAMING_SNAKE_CASE : Optional[int]=1 , _SCREAMING_SNAKE_CASE : Tuple=0.02 , _SCREAMING_SNAKE_CASE : Dict=1E-05 , _SCREAMING_SNAKE_CASE : Tuple=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Any=None , **_SCREAMING_SNAKE_CASE : Tuple , )-> str:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = position_embedding_type
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : List[Any] = classifier_dropout
class _a ( _lowercase):
@property
def UpperCAmelCase__( self : Any )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 211 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__: Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : str , *__snake_case : str , **__snake_case : int ) -> None:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 23 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__(A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : Tuple= BertConfig.from_json_file(A )
print(f'''Building PyTorch model from configuration: {config}''' )
lowercase__ : Optional[Any]= BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 150 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a : Optional[Any] = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a : str = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : str= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(A , A )
lowercase__ : Optional[int]= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : Optional[int]= "rougeLsum"
lowercase__ : str= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
lowercase__ : Union[str, Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Tuple= ["rouge1", "rouge2", "rougeL"]
lowercase__ : Optional[Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
lowercase__ : Dict= calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
assert score_sep == score_no_sep
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : int= [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowercase__ : Dict= [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(A , A , newline_sep=A ) == calculate_rouge(A , A , newline_sep=A )
def lowercase__() ->Dict:
"""simple docstring"""
lowercase__ : List[str]= [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowercase__ : Union[str, Any]= [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowercase__ : List[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] , newline_sep=A )["rougeLsum"]
lowercase__ : Optional[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any]= Path("examples/seq2seq/test_data/wmt_en_ro" )
lowercase__ : Union[str, Any]= calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(A , A )
lowercase__ : List[Any]= calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=A )
assert isinstance(A , A )
| 150 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCamelCase : int = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(A_ , '''config.json''' ) ):
os.remove(os.path.join(A_ , '''config.json''' ) )
if os.path.exists(os.path.join(A_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(A_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(A_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : Optional[Any] = 2
if unlogit:
lowerCAmelCase__ : Union[str, Any] = torch.pow(A_ , A_ )
lowerCAmelCase__ : Optional[Any] = p * torch.log(A_ )
lowerCAmelCase__ : List[Any] = 0
return -plogp.sum(dim=-1 )
def __SCREAMING_SNAKE_CASE ( A_ ):
logger.info('''lv, h >\t''' + '''\t'''.join(f'{x + 1}' for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase__ : Dict = torch.zeros(A_ , A_ ).to(args.device )
lowerCAmelCase__ : int = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
lowerCAmelCase__ : Union[str, Any] = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(A_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase__ : Any = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase__) ,) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase__ : Any = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
lowerCAmelCase__ : Dict = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Dict = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCAmelCase__ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(A_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(A_ )
logger.info('''Head ranked by importance scores''' )
lowerCAmelCase__ : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase__ : Optional[int] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase__ : int = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
lowerCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , A_ , original_score * args.masking_threshold )
lowerCAmelCase__ : Union[str, Any] = torch.ones_like(A_ )
lowerCAmelCase__ : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase__ : int = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase__ : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase__ : str = float('''Inf''' )
lowerCAmelCase__ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCAmelCase__ : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase__ : int = new_head_mask.view(-1 )
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Union[str, Any] = new_head_mask.view_as(A_ )
lowerCAmelCase__ : Tuple = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
lowerCAmelCase__ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
lowerCAmelCase__ : Optional[Any] = 1 / loss
lowerCAmelCase__ : Tuple = datetime.now() - before_time
lowerCAmelCase__ : int = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
lowerCAmelCase__ : int = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
lowerCAmelCase__ : List[Any] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : Any = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : int = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
lowerCAmelCase__ : int = 1 / loss
lowerCAmelCase__ : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , A_ , A_ , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , A_ , A_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(A_ , args.output_dir )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=A_ , type=A_ , required=A_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=A_ , type=A_ , required=A_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=A_ , type=A_ , required=A_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=A_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=A_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=A_ , type=A_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=A_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=A_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=A_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=A_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=A_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=A_ , default=42 )
parser.add_argument('''--local_rank''' , type=A_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase__ : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCAmelCase__ : str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase__ : Dict = torch.device('''cuda''' , args.local_rank )
lowerCAmelCase__ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase__ : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase__ : Dict = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
lowerCAmelCase__ : List[Any] = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , A_ )
# Prepare dataset
lowerCAmelCase__ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase__ : Union[str, Any] = (torch.from_numpy(A_ ),)
lowerCAmelCase__ : Tuple = TensorDataset(*A_ )
lowerCAmelCase__ : Optional[int] = RandomSampler(A_ )
lowerCAmelCase__ : Dict = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase__ : Tuple = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 106 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Dict = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ['LayoutLMv2FeatureExtractor']
lowerCAmelCase : int = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 | 0 |
from collections import defaultdict
from math import gcd
def __lowercase ( a__ = 1_50_00_00 ) -> int:
__SCREAMING_SNAKE_CASE = defaultdict(a__ )
__SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , a__ , 2 ):
if gcd(a__ , a__ ) > 1:
continue
__SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(a__ , limit + 1 , a__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = LayoutLMTokenizer
UpperCamelCase__ : Any = LayoutLMTokenizerFast
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : int = True
def _A ( self ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _A ( self , **_A ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
__SCREAMING_SNAKE_CASE = 'unwanted, running'
return input_text, output_text
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def _A ( self ):
'''simple docstring'''
pass
| 118 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 256 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple:
# Load configuration defined in the metadata file
with open(lowercase ) as metadata_file:
__snake_case : int = json.load(lowercase )
__snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Tuple = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
__snake_case : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
__snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : Tuple = json.load(lowercase )
__snake_case : List[Any] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
__snake_case : Any = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
__snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : List[Any] = state_dict[bias_name]
__snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self."""
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
__snake_case : str = state_dict[prefix + matrix_name]
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : str = state_dict[key]
else:
__snake_case : str = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
__snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Union[str, Any] = (0, 9)
__snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Any = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Optional[Any] = torch.Size((1, 33, 768) )
__snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 1, 768) )
__snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : str = MLukeTokenizer.from_pretrained(lowercase )
__snake_case : Dict = "Tokyo is the capital of <mask>."
__snake_case : Union[str, Any] = (24, 30)
__snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : int = model(**lowercase )
__snake_case : Dict = encoding["input_ids"][0].tolist()
__snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
__snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]:
__snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )]
__snake_case : Any = {}
for entry in data:
__snake_case : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Optional[int] = entity_id
break
__snake_case : Union[str, Any] = f"""{language}:{entity_name}"""
__snake_case : Any = entity_id
return new_mapping
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
A__ : List[str] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(A__ )
def __A ( self ):
A__ : List[Any] = logging.get_verbosity()
A__ : List[Any] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
A__ : Optional[Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(A__ ) as cl:
logger.warning(A__ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(A__ ) as cl:
logger.warning(A__ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(A__ ) as cl:
logger.warning(A__ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(A__ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def __A ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
A__ : Optional[Any] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
A__ : List[Any] = os.getenv("""TRANSFORMERS_VERBOSITY""" , A__ )
A__ : Optional[Any] = logging.log_levels[env_level_str]
A__ : Dict = logging.get_verbosity()
self.assertEqual(
A__ , A__ , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
A__ : Any = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def __A ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
A__ : Union[str, Any] = logging.logging.getLogger()
with CaptureLogger(A__ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def __A ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
A__ : Union[str, Any] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
A__ : Optional[Any] = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(A__ ) as cl:
logger.warning_advice(A__ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(A__ ) as cl:
logger.warning_advice(A__ )
self.assertEqual(cl.out , msg + """\n""" )
def UpperCamelCase () -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 141 |
from typing import Any
def UpperCamelCase (lowercase_: list ) -> list[Any]:
if not input_list:
return []
A__ : Any = [input_list.count(lowercase_ ) for value in input_list]
A__ : List[Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase (__A):
"""simple docstring"""
_a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__A , __A)
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = emb.weight.shape
_a = nn.Linear(__A , __A , bias=__A)
_a = emb.weight.data
return lin_layer
def lowerCAmelCase (__A , __A=None):
"""simple docstring"""
_a = {}
for old_key in state_dict.keys():
_a = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_a = key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''')
else:
_a = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''')
if "gate" in key:
_a = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''')
if "fc2" and "experts" not in key:
_a = key.replace('''.fc2.''' , '''.ffn.fc2.''')
if "fc1" and "experts" not in key:
_a = key.replace('''.fc1.''' , '''.ffn.fc1.''')
if ".encoder_attn." in key:
_a = key.replace('''.encoder_attn.''' , '''.cross_attention.''')
if "encoder_attn_layer_norm" in key:
_a = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''')
if "final_layer_norm" in key:
_a = key.replace('''final_layer_norm''' , '''ff_layer_norm''')
_a = state_dict[old_key]
return new_dict
def lowerCAmelCase (__A , __A , __A , __A , __A = WEIGHTS_NAME):
"""simple docstring"""
_a = []
_a = 0
os.makedirs(__A , exist_ok=__A)
for expert in range(__A):
_a = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__A):
_a = torch.load(__A)['''model''']
remove_ignore_keys_(__A)
_a = rename_fairseq_keys(__A , __A)
_a = os.path.join(
__A , weights_name.replace('''.bin''' , F'''-{len(__A)+1:05d}-of-???.bin'''))
torch.save(__A , __A)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(__A)[0]].dtype)
# Add the last block
_a = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{len(__A)+1:05d}-of-???.bin'''))
_a = torch.load(switch_checkpoint_path + '''-shared.pt''')['''model''']
remove_ignore_keys_(__A)
_a = rename_fairseq_keys(__A , __A)
_a = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__A) == 1:
_a = os.path.join(__A , __A)
torch.save(__A , __A)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__A , __A)
# Otherwise, let's build the index
_a = {}
for idx, shard in enumerate(__A):
_a = weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__A):05d}.bin''')
_a = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(__A , os.path.join(__A , __A))
for key in shard:
_a = shard_file
# Add the metadata
_a = {'''total_size''': total_size}
_a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__A , __A) , '''w''' , encoding='''utf-8''') as f:
_a = json.dumps(__A , indent=2 , sort_keys=__A) + '''\n'''
f.write(__A)
return metadata, index
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
lowercase_ = parser.parse_args()
lowercase_ , lowercase_ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowercase_ = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowercase_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 211 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'glpn'
def __init__(self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[32, 64, 160, 256] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=0.1 , A=1E-6 , A=64 , A=10 , A=-1 , **A , ) -> Any:
"""simple docstring"""
super().__init__(**A )
_a = num_channels
_a = num_encoder_blocks
_a = depths
_a = sr_ratios
_a = hidden_sizes
_a = patch_sizes
_a = strides
_a = mlp_ratios
_a = num_attention_heads
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = drop_path_rate
_a = layer_norm_eps
_a = decoder_hidden_size
_a = max_depth
_a = head_in_index
| 211 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class A_ :
lowerCAmelCase__ = LEDConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = 'gelu'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any]=13 ,__lowerCAmelCase: Union[str, Any]=7 ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: List[Any]=99 ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=4 ,__lowerCAmelCase: List[Any]=37 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Tuple=20 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Optional[int]=1 ,__lowerCAmelCase: List[str]=0 ,__lowerCAmelCase: Dict=4 ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Union[str, Any] = eos_token_id
_lowerCamelCase : Optional[Any] = pad_token_id
_lowerCamelCase : Optional[int] = bos_token_id
_lowerCamelCase : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_lowerCamelCase : str = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_lowerCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
_lowerCamelCase : Dict = tf.concat([input_ids, eos_tensor] ,axis=1 )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,attention_window=self.attention_window ,**self.config_updates ,)
_lowerCamelCase : List[Any] = prepare_led_inputs_dict(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tf.concat(
[tf.zeros_like(__lowerCAmelCase )[:, :-1], tf.ones_like(__lowerCAmelCase )[:, -1:]] ,axis=-1 ,)
_lowerCamelCase : int = global_attention_mask
return config, inputs_dict
def _lowercase ( self: Dict ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = TFLEDModel(config=__lowerCAmelCase ).get_decoder()
_lowerCamelCase : Optional[int] = inputs_dict["input_ids"]
_lowerCamelCase : List[Any] = input_ids[:1, :]
_lowerCamelCase : Tuple = inputs_dict["attention_mask"][:1, :]
_lowerCamelCase : Optional[int] = 1
# first forward pass
_lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,use_cache=__lowerCAmelCase )
_lowerCamelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
_lowerCamelCase : int = tf.concat([input_ids, next_tokens] ,axis=-1 )
_lowerCamelCase : Dict = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
_lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase )[0]
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Tuple = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
_lowerCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase ,__lowerCAmelCase ,rtol=1e-3 )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCAmelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = TFLEDModelTester(self )
_lowerCamelCase : str = ConfigTester(self ,config_class=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = tf.zeros_like(inputs_dict["attention_mask"] )
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices ,1 ,inputs_dict["global_attention_mask"] ,)
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = self.model_tester.seq_length
_lowerCamelCase : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCAmelCase: Tuple ):
_lowerCamelCase : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
def check_encoder_attentions_output(__lowerCAmelCase: Union[str, Any] ):
_lowerCamelCase : List[str] = [t.numpy() for t in outputs.encoder_attentions]
_lowerCamelCase : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] ,)
for model_class in self.all_model_classes:
_lowerCamelCase : Any = True
_lowerCamelCase : str = False
_lowerCamelCase : List[str] = False
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : List[str] = len(__lowerCAmelCase )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
if self.is_encoder_decoder:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_decoder_attentions_output(__lowerCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCamelCase : List[str] = True
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(__lowerCAmelCase ) )
self.assertEqual(model.config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
return tf.constant(_lowerCamelCase , dtype=tf.intaa )
__snake_case : str = 1e-4
@slow
@require_tf
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : int = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_lowerCamelCase : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCamelCase : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCamelCase : Tuple = prepare_led_inputs_dict(model.config ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = model(**__lowerCAmelCase )[0]
_lowerCamelCase : Union[str, Any] = (1, 1_024, 768)
self.assertEqual(output.shape ,__lowerCAmelCase )
# change to expected output here
_lowerCamelCase : str = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-3 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_lowerCamelCase : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCamelCase : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_lowerCamelCase : Dict = prepare_led_inputs_dict(model.config ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = model(**__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape ,__lowerCAmelCase )
# change to expected output here
_lowerCamelCase : Dict = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-3 ,rtol=1e-3 )
| 369 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 340 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["ChineseCLIPFeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 150 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
SCREAMING_SNAKE_CASE__ = {"mgp-str": 27}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase , lowerCAmelCase="[GO]" , lowerCAmelCase="[GO]" , lowerCAmelCase="[s]" , lowerCAmelCase="[GO]" , **lowerCAmelCase ):
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
snake_case = json.load(lowerCAmelCase )
snake_case = {v: k for k, v in self.vocab.items()}
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.vocab )
def snake_case ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for s in text:
char_tokens.extend(lowerCAmelCase )
return char_tokens
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase ) )
return
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' )
return (vocab_file,)
| 150 | 1 |
import math
UpperCamelCase__ = 10
UpperCamelCase__ = 7
UpperCamelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def _a ( SCREAMING_SNAKE_CASE_ : int = 20 ):
__lowerCAmelCase = math.comb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 102 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 102 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : List[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 118 |
from functools import lru_cache
@lru_cache
def a__ ( __UpperCamelCase ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowerCamelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase__ , max_perimeter + 1 ):
__lowerCamelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase__ ):
__lowerCamelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] = 1000 ):
"""simple docstring"""
__lowerCamelCase = pythagorean_triple(lowerCAmelCase__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 359 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( A , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaControlnetPipeline
lowerCAmelCase_ = ["image_embeds", "negative_image_embeds", "hint"]
lowerCAmelCase_ = ["image_embeds", "negative_image_embeds", "hint"]
lowerCAmelCase_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase_ = False
@property
def snake_case ( self : Tuple ):
"""simple docstring"""
return 32
@property
def snake_case ( self : Tuple ):
"""simple docstring"""
return 32
@property
def snake_case ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def snake_case ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def snake_case ( self : Any ):
"""simple docstring"""
return 100
@property
def snake_case ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__lowercase ={
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowercase =UNetaDConditionModel(**__lowercase )
return model
@property
def snake_case ( self : str ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
__lowercase =VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =self.dummy_unet
__lowercase =self.dummy_movq
__lowercase =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=__lowercase , )
__lowercase ={
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case ( self : Optional[int] , __lowercase : List[str] , __lowercase : str=0 ):
"""simple docstring"""
__lowercase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create hint
__lowercase =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
if str(__lowercase ).startswith('mps' ):
__lowercase =torch.manual_seed(__lowercase )
else:
__lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase ={
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase ='cpu'
__lowercase =self.get_dummy_components()
__lowercase =self.pipeline_class(**__lowercase )
__lowercase =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowercase =pipe(**self.get_dummy_inputs(__lowercase ) )
__lowercase =output.images
__lowercase =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__lowercase =image[0, -3:, -3:, -1]
__lowercase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase =np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
"""simple docstring"""
__lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__lowercase =torch.from_numpy(np.array(__lowercase ) ).float() / 2_5_5.0
__lowercase =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowercase =KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__lowercase =KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__lowercase =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__lowercase ='A robot, 4k photo'
__lowercase =torch.Generator(device='cuda' ).manual_seed(0 )
__lowercase , __lowercase =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowercase =torch.Generator(device='cuda' ).manual_seed(0 )
__lowercase =pipeline(
image_embeds=__lowercase , negative_image_embeds=__lowercase , hint=__lowercase , generator=__lowercase , num_inference_steps=100 , output_type='np' , )
__lowercase =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 141 |
'''simple docstring'''
import math
class lowerCAmelCase :
def snake_case ( self : Optional[int] , __lowercase : list[list[float]] , __lowercase : list[int] ):
"""simple docstring"""
__lowercase =0.0
__lowercase =0.0
for i in range(len(__lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def snake_case ( self : Union[str, Any] , __lowercase : list[list[int | float]] , __lowercase : list[int] , __lowercase : int , __lowercase : float ):
"""simple docstring"""
for i in range(len(__lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowercase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowercase =SelfOrganizingMap()
__lowercase =3
__lowercase =0.5
for _ in range(lowercase__ ):
for j in range(len(lowercase__ ) ):
# training sample
__lowercase =training_samples[j]
# Compute the winning vector
__lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ )
# Update the winning vector
__lowercase =self_organizing_map.update(lowercase__, lowercase__, lowercase__, lowercase__ )
# classify test sample
__lowercase =[0, 0, 0, 1]
__lowercase =self_organizing_map.get_winner(lowercase__, lowercase__ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 141 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
UpperCamelCase_: Tuple = TOKENIZER_CLASSES
else:
UpperCamelCase_: Optional[Any] = {tokenizer_name: getattr(lowerCamelCase , tokenizer_name + """Fast""" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
UpperCamelCase_: List[str] = TOKENIZER_CLASSES[tokenizer_name]
UpperCamelCase_: Optional[Any] = True
if checkpoint_name is None:
UpperCamelCase_: Any = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCamelCase_: Tuple = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
UpperCamelCase_: int = tokenizer_class.from_pretrained(lowerCamelCase , force_download=lowerCamelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCamelCase_: List[str] = checkpoint.split("""/""" )
UpperCamelCase_: Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
elif add_prefix:
UpperCamelCase_: Optional[int] = checkpoint
UpperCamelCase_: List[str] = dump_path
else:
UpperCamelCase_: List[str] = None
UpperCamelCase_: Optional[Any] = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCamelCase_: List[str] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCamelCase_: Any = file_path.split(lowerCamelCase )[-1][0]
if next_char == "/":
UpperCamelCase_: int = os.path.join(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: str = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
UpperCamelCase_: str = tokenizer.save_pretrained(
lowerCamelCase , legacy_format=lowerCamelCase , filename_prefix=lowerCamelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(lowerCamelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCamelCase_ : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 359 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = PegasusConfig
__UpperCamelCase : str = {}
__UpperCamelCase : Optional[Any] = """gelu"""
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : str=13 , snake_case_ : Dict=7 , snake_case_ : List[Any]=True , snake_case_ : Optional[int]=False , snake_case_ : Any=99 , snake_case_ : Optional[Any]=32 , snake_case_ : Dict=2 , snake_case_ : Any=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Dict=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=40 , snake_case_ : Tuple=2 , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Optional[Any] = batch_size
UpperCamelCase_: Union[str, Any] = seq_length
UpperCamelCase_: Tuple = is_training
UpperCamelCase_: Tuple = use_labels
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: Optional[Any] = num_hidden_layers
UpperCamelCase_: List[Any] = num_attention_heads
UpperCamelCase_: Optional[int] = intermediate_size
UpperCamelCase_: Dict = hidden_dropout_prob
UpperCamelCase_: str = attention_probs_dropout_prob
UpperCamelCase_: Optional[int] = max_position_embeddings
UpperCamelCase_: Union[str, Any] = eos_token_id
UpperCamelCase_: Optional[int] = pad_token_id
UpperCamelCase_: List[Any] = bos_token_id
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: List[str] = prepare_pegasus_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Any , snake_case_ : List[str] , snake_case_ : Dict ):
UpperCamelCase_: Any = TFPegasusModel(config=snake_case_ ).get_decoder()
UpperCamelCase_: Any = inputs_dict["""input_ids"""]
UpperCamelCase_: int = input_ids[:1, :]
UpperCamelCase_: List[str] = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase_: Tuple = inputs_dict["""head_mask"""]
UpperCamelCase_: int = 1
# first forward pass
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: List[Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: str = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if attention_mask is None:
UpperCamelCase_: Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__UpperCamelCase : str = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : int = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = TFPegasusModelTester(self )
UpperCamelCase_: List[Any] = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase : Optional[int] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__UpperCamelCase : Union[str, Any] = """google/pegasus-xsum"""
@cached_property
def lowerCAmelCase__ ( self : Dict ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : Optional[int] ):
UpperCamelCase_: str = self.translate_src_text(**snake_case_ )
assert self.expected_text == generated_words
def lowerCAmelCase__ ( self : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: Tuple = self.tokenizer(self.src_text , **snake_case_ , padding=snake_case_ , return_tensors="""tf""" )
UpperCamelCase_: Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case_ , )
UpperCamelCase_: Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case_ )
return generated_words
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
self._assert_generated_batch_equal_expected()
| 223 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case_ = 8
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : List[str]=BITS ) -> Any:
__snake_case = x.device
__snake_case = (x * 255).int().clamp(0 , 255 )
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase_ )
__snake_case = rearrange(UpperCamelCase_ , '''d -> d 1 1''' )
__snake_case = rearrange(UpperCamelCase_ , '''b c h w -> b c 1 h w''' )
__snake_case = ((x & mask) != 0).float()
__snake_case = rearrange(UpperCamelCase_ , '''b c d h w -> b (c d) h w''' )
__snake_case = bits * 2 - 1
return bits
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Dict=BITS ) -> Dict:
__snake_case = x.device
__snake_case = (x > 0).int()
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase_ , dtype=torch.intaa )
__snake_case = rearrange(UpperCamelCase_ , '''d -> d 1 1''' )
__snake_case = rearrange(UpperCamelCase_ , '''b (c d) h w -> b c d h w''' , d=8 )
__snake_case = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def lowerCamelCase__ ( self : List[str] , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : float = 0.0 , snake_case_ : bool = True , snake_case_ : Optional[Any]=None , snake_case_ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__snake_case = self.alphas_cumprod[timestep]
__snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(UpperCamelCase_ , -scale , UpperCamelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__snake_case = self._get_variance(UpperCamelCase_ , UpperCamelCase_ )
__snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__snake_case = model_output.device if torch.is_tensor(UpperCamelCase_ ) else '''cpu'''
__snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ ).to(UpperCamelCase_ )
__snake_case = self._get_variance(UpperCamelCase_ , UpperCamelCase_ ) ** 0.5 * eta * noise
__snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : Any="epsilon" , snake_case_ : List[Any]=None , snake_case_ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
__snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__snake_case , __snake_case = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
__snake_case = None
# 1. compute alphas, betas
__snake_case = self.alphas_cumprod[t]
__snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
__snake_case = 1 - alpha_prod_t
__snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__snake_case = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(UpperCamelCase_ , -scale , UpperCamelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case = 0
if t > 0:
__snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase_ ).to(model_output.device )
__snake_case = (self._get_variance(UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
__snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Any , a__ : Tuple , a__ : Union[str, Any] , a__ : int = 1.0 , ):
"""simple docstring"""
super().__init__()
__snake_case = bit_scale
__snake_case = (
ddim_bit_scheduler_step if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__(self : Optional[int] , a__ : List[Any] = 256 , a__ : str = 256 , a__ : str = 50 , a__ : str = None , a__ : Dict = 1 , a__ : List[Any] = "pil" , a__ : Any = True , **a__ : List[Any] , ):
"""simple docstring"""
__snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCAmelCase , )
__snake_case = decimal_to_bits(__UpperCAmelCase ) * self.bit_scale
__snake_case = latents.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__snake_case = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
__snake_case = bits_to_decimal(__UpperCAmelCase )
if output_type == "pil":
__snake_case = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 24 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 1_00_01) -> Optional[Any]:
try:
a = int(_lowercase)
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int.") from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one.")
a = []
a = 2
while len(_lowercase) < nth:
if is_prime(_lowercase):
primes.append(_lowercase)
num += 1
else:
num += 1
return primes[len(_lowercase) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 359 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase__ : Optional[int] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowercase__ : int = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowercase__ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowercase__ : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase_ ( self , A , A , A=0.9 , A=3 , A=0.5 ) -> Tuple:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
a = [
meteor_score.single_meteor_score(
word_tokenize(A ) , word_tokenize(A ) , alpha=A , beta=A , gamma=A )
for ref, pred in zip(A , A )
]
else:
a = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A )
for ref, pred in zip(A , A )
]
return {"meteor": np.mean(A )}
| 180 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=2 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=False , a_=True , a_="None" , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : str = parent
__snake_case : List[str] = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : List[Any] = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : Optional[int] = use_token_type_ids
__snake_case : Optional[int] = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : int = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : Any = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Tuple = num_choices
__snake_case : int = relative_attention
__snake_case : List[Any] = position_biased_input
__snake_case : Optional[Any] = pos_att_type
__snake_case : str = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : List[str] = None
if self.use_input_mask:
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : List[str] = None
if self.use_token_type_ids:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = None
__snake_case : Union[str, Any] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Dict = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = TFDebertaVaModel(config=a_ )
__snake_case : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__snake_case : int = [input_ids, input_mask]
__snake_case : List[str] = model(a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = TFDebertaVaForMaskedLM(config=a_ )
__snake_case : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : Optional[Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : List[Any] = TFDebertaVaForSequenceClassification(config=a_ )
__snake_case : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : int = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = self.num_labels
__snake_case : Any = TFDebertaVaForTokenClassification(config=a_ )
__snake_case : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : Dict = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = TFDebertaVaForQuestionAnswering(config=a_ )
__snake_case : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : int = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFDebertaVaModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(a_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
__snake_case : Tuple = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__snake_case : Tuple = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case : str = model(a_ , attention_mask=a_ )[0]
__snake_case : List[str] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a_ , atol=1E-4 )
| 102 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='new-model'
if is_tf_available():
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =NewModelConfig
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''bert-base-cased'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : int = TFAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''bert-base-cased'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : List[Any] = TFAutoModelForPreTraining.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Dict = TFAutoModelForCausalLM.from_pretrained(a_ )
__snake_case , __snake_case : int = TFAutoModelForCausalLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(a_ )
__snake_case , __snake_case : int = TFAutoModelForMaskedLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a_ )
__snake_case , __snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case : Any = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Dict = TFAutoModelForSequenceClassification.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case : Optional[int] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
@require_tensorflow_probability
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(a_ )
__snake_case , __snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a_ ) , 1_44_10 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a_ ) , 1_44_10 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(a_ , a_ )
__snake_case : Optional[Any] = copy.deepcopy(model.config )
__snake_case : int = ['''FunnelBaseModel''']
__snake_case : Any = TFAutoModel.from_config(a_ )
self.assertIsInstance(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
__snake_case : Dict = TFAutoModel.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , a_ )
__snake_case : List[str] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_ , a_ )
auto_class.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_ , a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : Union[str, Any] = BertModelTester(self ).get_config()
__snake_case : str = NewModelConfig(**tiny_config.to_dict() )
__snake_case : Optional[int] = auto_class.from_config(a_ )
self.assertIsInstance(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
__snake_case : Optional[int] = auto_class.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(
a_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__snake_case : Any = TFAutoModel.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(
a_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case : Dict = TFAutoModel.from_pretrained(a_ , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(
a_ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__snake_case : Any = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(a_ , '''Use `from_pt=True` to load this model''' ):
__snake_case : Dict = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__snake_case : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__snake_case : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__snake_case : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 102 | 1 |
"""simple docstring"""
import numpy
# List of input, output pairs
__lowercase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowercase = (((515, 22, 13), 555), ((61, 35, 49), 150))
__lowercase = [2, 4, 1, 5]
__lowercase = len(train_data)
__lowercase = 0.0_09
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : str="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - output(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : str =0
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : Any=m ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =0
for i in range(SCREAMING_SNAKE_CASE_ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE_ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE_ ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase (__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Tuple =summation_of_cost_derivative(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / m
return cost_derivative_value
def lowerCAmelCase ():
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__UpperCamelCase : Optional[int] =0.0_0_0_0_0_2
__UpperCamelCase : List[str] =0
__UpperCamelCase : Optional[int] =0
while True:
j += 1
__UpperCamelCase : Optional[int] =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
__UpperCamelCase : Optional[Any] =get_cost_derivative(i - 1 )
__UpperCamelCase : Optional[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ , rtol=SCREAMING_SNAKE_CASE_ , ):
break
__UpperCamelCase : Union[str, Any] =temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCAmelCase ():
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
print(('''Actual output value:''', output(SCREAMING_SNAKE_CASE_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 363 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ = 101 ):
'''simple docstring'''
UpperCamelCase__ :int = length
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , UpperCamelCase_ ):
'''simple docstring'''
return i
class lowercase :
"""simple docstring"""
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
return {"input_ids": torch.tensor(UpperCamelCase_ ), "labels": torch.tensor(UpperCamelCase_ )}
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase__ :List[Any] = nn.Linear(120 , 80 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowercase ( A__ ):
"""simple docstring"""
@require_torch_neuroncore
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
UpperCamelCase__ :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :Tuple = F'''--output_dir {output_dir}'''.split()
UpperCamelCase__ :Tuple = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase ( A__ ):
"""simple docstring"""
@require_torch_multi_gpu
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
UpperCamelCase__ :int = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :List[Any] = F'''--output_dir {output_dir}'''.split()
UpperCamelCase__ :str = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__snake_case = HfArgumentParser((TrainingArguments,))
__snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__snake_case = DummyDataset(dataset_length)
def a ( __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = list(range(len(__a ) ) )
UpperCamelCase__ :Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__snake_case = 2
__snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__snake_case = None
| 97 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 0 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__SCREAMING_SNAKE_CASE : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 73 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : List[str] ) -> Optional[Any]:
_lowerCamelCase = len(lowercase_ )
while cur > 1:
# Find the maximum number in arr
_lowerCamelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCamelCase = arr[mi::-1] + arr[mi + 1 : len(lowercase_ )]
# Reverse whole list
_lowerCamelCase = arr[cur - 1 :: -1] + arr[cur : len(lowercase_ )]
cur -= 1
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = input('''Enter numbers separated by a comma:\n''').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 73 | 1 |
from __future__ import annotations
_UpperCAmelCase : Optional[Any] =8.988E9 # units = N * m^s * C^-2
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : int = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowerCAmelCase_ : Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowerCAmelCase_ : int = abs(__lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowerCAmelCase_ : Dict = abs(__lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowerCAmelCase_ : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(__lowerCamelCase )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 |
'''simple docstring'''
lowerCAmelCase : str ='''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase : int =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase : List[str] ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 223 | 0 |
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ )
A__ = checkpoints.load_tax_checkpoint(lowercase_ )
A__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
A__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''encoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_global_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = tax_mlp_layer_norm
A__ = flax_model_encoder_layer_block
# Only for layer 0:
A__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_global_rel_embedding
# Assigning
A__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
A__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
A__ = tax_enc_dec_attention_module['''key''']['''kernel''']
A__ = tax_enc_dec_attention_module['''out''']['''kernel''']
A__ = tax_enc_dec_attention_module['''query''']['''kernel''']
A__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''decoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_pre_attention_layer_norm
A__ = tax_enc_dec_attention_key
A__ = tax_enc_dec_attention_out
A__ = tax_enc_dec_attention_query
A__ = tax_enc_dec_attention_value
A__ = tax_cross_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = txa_mlp_layer_norm
A__ = flax_model_decoder_layer_block
# Decoder Normalization
A__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
A__ = txa_decoder_norm
# Only for layer 0:
A__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_decoder_rel_embedding
# Token Embeddings
A__ = tax_model['''target''']['''token_embedder''']['''embedding''']
A__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(lowercase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 231 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A ( a_ ,a_ ,a_ ,a_ ) -> List[str]:
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def A ( a_ ,a_ ,a_ ,a_ ,a_=True ) -> Tuple:
model.train()
__UpperCamelCase : List[Any] =model(a_ )
__UpperCamelCase : List[Any] =F.mse_loss(a_ ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(a_ )
def A ( a_ ,a_=False ) -> List[Any]:
set_seed(42 )
__UpperCamelCase : str =RegressionModel()
__UpperCamelCase : int =deepcopy(a_ )
__UpperCamelCase : Optional[int] =RegressionDataset(length=80 )
__UpperCamelCase : List[Any] =DataLoader(a_ ,batch_size=16 )
model.to(accelerator.device )
if sched:
__UpperCamelCase : Optional[int] =AdamW(params=model.parameters() ,lr=1e-3 )
__UpperCamelCase : Tuple =AdamW(params=ddp_model.parameters() ,lr=1e-3 )
__UpperCamelCase : int =LambdaLR(a_ ,lr_lambda=lambda a_ : epoch**0.65 )
__UpperCamelCase : Tuple =LambdaLR(a_ ,lr_lambda=lambda a_ : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict =accelerator.prepare(a_ ,a_ ,a_ ,a_ )
else:
__UpperCamelCase , __UpperCamelCase : str =accelerator.prepare(a_ ,a_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A ( a_ ) -> Union[str, Any]:
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any =get_training_setup(a_ )
# Use a single batch
__UpperCamelCase , __UpperCamelCase : int =next(iter(a_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase : List[str] =accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a_ ,a_ ,a_ ,a_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a_ ):
step_model(a_ ,a_ ,a_ ,a_ )
else:
# Sync grads
step_model(a_ ,a_ ,a_ ,a_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(a_ ,a_ ,a_ ,a_ )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
__UpperCamelCase : str =ddp_input[torch.randperm(len(a_ ) )]
def A ( a_ ) -> Tuple:
# Test on distributed setup that context manager behaves properly
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict =get_training_setup(a_ )
# Use a single batch
__UpperCamelCase , __UpperCamelCase : Optional[int] =next(iter(a_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase : Optional[Any] =accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase : int =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a_ ,a_ ,a_ ,a_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a_ ):
step_model(a_ ,a_ ,a_ ,a_ )
else:
# Sync grads
step_model(a_ ,a_ ,a_ ,a_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
__UpperCamelCase : Optional[int] =ddp_input[torch.randperm(len(a_ ) )]
def A ( a_=False ,a_=False ) -> Tuple:
__UpperCamelCase : Union[str, Any] =Accelerator(
split_batches=a_ ,dispatch_batches=a_ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =get_training_setup(a_ )
for iteration, batch in enumerate(a_ ):
__UpperCamelCase , __UpperCamelCase : Tuple =batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase : Any =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a_ ,a_ ,a_ ,a_ ,a_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(a_ ):
step_model(a_ ,a_ ,a_ ,a_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(a_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
__UpperCamelCase : Union[str, Any] =ddp_input[torch.randperm(len(a_ ) )]
GradientState._reset_state()
def A ( a_=False ,a_=False ) -> Dict:
__UpperCamelCase : int =Accelerator(
split_batches=a_ ,dispatch_batches=a_ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str =get_training_setup(a_ ,a_ )
for iteration, batch in enumerate(a_ ):
__UpperCamelCase , __UpperCamelCase : List[Any] =batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase : Dict =accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase : Optional[int] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(a_ ,a_ ,a_ ,a_ ,a_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(a_ ):
step_model(a_ ,a_ ,a_ ,a_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
__UpperCamelCase : Tuple =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a_ ))
if accelerator.num_processes > 1:
check_model_parameters(a_ ,a_ ,a_ ,a_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def A ( ) -> int:
__UpperCamelCase : List[Any] =Accelerator()
__UpperCamelCase : Any =RegressionDataset(length=80 )
__UpperCamelCase : Any =DataLoader(a_ ,batch_size=16 )
__UpperCamelCase : List[Any] =RegressionDataset(length=96 )
__UpperCamelCase : Optional[int] =DataLoader(a_ ,batch_size=16 )
__UpperCamelCase , __UpperCamelCase : Dict =accelerator.prepare(a_ ,a_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(a_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a_ )
if iteration < len(a_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(a_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a_ )
if batch_num < len(a_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A ( ) -> Union[str, Any]:
__UpperCamelCase : List[Any] =Accelerator()
__UpperCamelCase : List[str] =accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(a_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(a_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' ,F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' ,)
test_gradient_accumulation(a_ ,a_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' ,'2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' ,'`split_batches=False`, `dispatch_batches=False`**' ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' ,F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' ,)
test_gradient_accumulation_with_opt_and_scheduler(a_ ,a_ )
def A ( a_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 71 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Dict = WavaVecaPhonemeCTCTokenizer
lowerCamelCase :Optional[int] = False
def UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
_A = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=20 , lowerCAmelCase_=5 ) -> Tuple[str, list]:
_A = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )) for i in range(len(lowerCAmelCase_ ) )]
_A = list(filter(lambda lowerCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
_A = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
_A = toks + toks
# toks_str = [t[1] for t in toks]
_A = [t[0] for t in toks]
# Ensure consistency
_A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
_A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
_A = """ """ + output_txt
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_A = tokenizer("""m xxx ɪ""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_A = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
_A = tokenizer("""maɪ c""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase ( self ) -> int:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_A = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> Dict:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowerCAmelCase_ )
_A = """Hello how are you"""
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""en-us""" ).input_ids
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowerCAmelCase_ , """ɛ l o h aʊ a ʁ j u""" )
def UpperCAmelCase ( self ) -> Any:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how Are you"""
_A = """hello how are you"""
_A = tokenizer(lowerCAmelCase_ ).input_ids
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_A = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_A = tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCAmelCase_ ) )
# transform list to ModelOutput
_A = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
[recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for la, la in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_A = tokenizer.batch_decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ )
_A = [tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ ) for ids in sample_ids]
check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def UpperCAmelCase ( self ) -> int:
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_A = tokenizer.add_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size + len(lowerCAmelCase_ ) )
_A = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_A = tokenizer.add_special_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size_a + len(lowerCAmelCase_ ) )
_A = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> str:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_A = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_A = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(output["""text"""] , lowerCAmelCase_ )
| 180 | 0 |
def A_ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(_lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 140 |
from pathlib import Path
import numpy as np
from PIL import Image
def A_ ( _lowerCAmelCase ) -> np.ndarray:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def A_ ( _lowerCAmelCase ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray:
UpperCamelCase : Dict = np.zeros_like(_lowerCAmelCase )
UpperCamelCase : List[str] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCamelCase : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCamelCase : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCamelCase : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__lowerCamelCase : int = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
__lowerCamelCase : Tuple = np.array(Image.open(lena_path))
# kernel to be applied
__lowerCamelCase : Optional[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__lowerCamelCase : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__lowerCamelCase : Any = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 140 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__magic_name__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
return new_state_dict
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-256:]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 800 if """detection""" in checkpoint_url else 1000
__SCREAMING_SNAKE_CASE = target_max_size / current_max_size
__SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = F.to_tensor(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = F.normalize(UpperCamelCase_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
logger.info("""Converting model...""" )
# load original state dict
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__SCREAMING_SNAKE_CASE = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
# create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 15
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = {0: """table""", 1: """table rotated"""}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE = 125
__SCREAMING_SNAKE_CASE = 6
__SCREAMING_SNAKE_CASE = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
__SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# verify our conversion
__SCREAMING_SNAKE_CASE = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = Image.open(UpperCamelCase_ ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = normalize(resize(UpperCamelCase_ , UpperCamelCase_ ) ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE = model(UpperCamelCase_ )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = (1, 15, 3)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
__SCREAMING_SNAKE_CASE = (1, 125, 7)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
__SCREAMING_SNAKE_CASE = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(UpperCamelCase_ )
image_processor.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__magic_name__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 100 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "upernet"
def __init__( self , a__=None , a__=512 , a__=0.0_2 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a__ , a__ ):
snake_case_ = backbone_config.get("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(a__ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _snake_case ( A ) -> Dict:
if not is_accelerate_available():
return method
lowerCAmelCase__ = version.parse(accelerate.__version__ ).base_version
if version.parse(A ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *A , **A ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *A , **A )
return wrapper
| 228 |
'''simple docstring'''
def _snake_case ( A = 10 , A = 22 ) -> int:
lowerCAmelCase__ = range(1 , A )
lowerCAmelCase__ = range(1 , A )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 228 | 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class A_ :
def __init__( self : Optional[Any]):
__lowerCamelCase : Dict = ''
__lowerCamelCase : str = ''
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : str = 0
__lowerCamelCase : Any = 2_5_6
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Any = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : str = 0
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE__ ,0)
__lowerCamelCase : Optional[int] = copy.deepcopy(self.img)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = plt.hist(self.img.ravel() ,2_5_6 ,[0, 2_5_6] ,label='x')
__lowerCamelCase : str = np.sum(SCREAMING_SNAKE_CASE__)
for i in range(len(SCREAMING_SNAKE_CASE__)):
__lowerCamelCase : List[str] = x[i] / self.k
self.sk += prk
__lowerCamelCase : List[str] = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCamelCase : Union[str, Any] = int(last % last)
__lowerCamelCase : Optional[int] = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = int(np.ma.count(self.img) / self.img[1].size)
__lowerCamelCase : Dict = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
__lowerCamelCase : Optional[int] = self.img[j][i]
if num != self.last_list[num]:
__lowerCamelCase : Optional[Any] = self.last_list[num]
cva.imwrite('output_data/output.jpg' ,self.img)
def lowerCAmelCase ( self : int):
plt.hist(self.img.ravel() ,2_5_6 ,[0, 2_5_6])
def lowerCAmelCase ( self : List[Any]):
cva.imshow('Output-Image' ,self.img)
cva.imshow('Input-Image' ,self.original_image)
cva.waitKey(5_0_0_0)
cva.destroyAllWindows()
if __name__ == "__main__":
a =os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
a =ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 73 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''mask2former'''
_UpperCAmelCase : Dict = ['''swin''']
_UpperCAmelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 2_5_5 ,SCREAMING_SNAKE_CASE__ : int = 1_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
__lowerCamelCase : Optional[Any] = CONFIG_MAPPING['swin'](
image_size=2_2_4 ,in_channels=3 ,patch_size=4 ,embed_dim=9_6 ,depths=[2, 2, 1_8, 2] ,num_heads=[3, 6, 1_2, 2_4] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = backbone_config.pop('model_type')
__lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported)}")
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = feature_size
__lowerCamelCase : List[str] = mask_feature_size
__lowerCamelCase : int = hidden_dim
__lowerCamelCase : str = encoder_feedforward_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : List[Any] = decoder_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Tuple = dropout
__lowerCamelCase : Dict = dim_feedforward
__lowerCamelCase : Union[str, Any] = pre_norm
__lowerCamelCase : List[str] = enforce_input_projection
__lowerCamelCase : Optional[int] = common_stride
__lowerCamelCase : Dict = ignore_value
__lowerCamelCase : Optional[Any] = num_queries
__lowerCamelCase : int = no_object_weight
__lowerCamelCase : Optional[Any] = class_weight
__lowerCamelCase : str = mask_weight
__lowerCamelCase : List[str] = dice_weight
__lowerCamelCase : Dict = train_num_points
__lowerCamelCase : Optional[int] = oversample_ratio
__lowerCamelCase : Optional[Any] = importance_sample_ratio
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : Tuple = init_xavier_std
__lowerCamelCase : Union[str, Any] = use_auxiliary_loss
__lowerCamelCase : List[Any] = feature_strides
__lowerCamelCase : Any = output_auxiliary_logits
__lowerCamelCase : List[Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Tuple):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : List[Any] = self.backbone_config.to_dict()
__lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
| 73 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Any=100 , UpperCAmelCase_ : List[str]=1026 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str="data/tokenized_stories_train_wikitext103.jbl" , UpperCAmelCase_ : List[Any]="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
a , a :Optional[int] = generate_datasets(
UpperCAmelCase_ , UpperCAmelCase_ , number=UpperCAmelCase_ , min_len=1026 , trim=UpperCAmelCase_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
a :str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
a :str = load_gpta('''gpt2''' ).to(UpperCAmelCase_ )
print('''computing perplexity on objective set''' )
a :Dict = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).item()
print('''perplexity on objective set:''' , UpperCAmelCase_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=15 , UpperCAmelCase_ : Optional[Any]=128 , UpperCAmelCase_ : List[Any]=100 , UpperCAmelCase_ : List[str]="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
a :Tuple = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
a :List[str] = SecondaryLearner(UpperCAmelCase_ )
# Train secondary learner
a :List[str] = train_secondary_learner(
UpperCAmelCase_ , UpperCAmelCase_ , max_epochs=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , eval_freq=100 , igf_model_path=UpperCAmelCase_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : List[str]=1000 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Any=1.0 , UpperCAmelCase_ : Optional[int]=recopy_gpta , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=10 , UpperCAmelCase_ : Any="gpt2_finetuned.pt" , ):
"""simple docstring"""
a :Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
a :Optional[Any] = RandomSampler(UpperCAmelCase_ )
a :Union[str, Any] = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ )
a :List[str] = max_steps // (len(UpperCAmelCase_ )) + 1
a :Tuple = 0
a :int = torch.zeros((1, context_len) , dtype=torch.long , device=UpperCAmelCase_ )
a , a , a :str = recopy_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCAmelCase_ )
secondary_learner.eval()
a :Optional[Any] = []
a :Union[str, Any] = 0
a :Optional[Any] = []
a :Tuple = []
# Compute the performance of the transformer model at the beginning
a :Any = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
test_perps.append(UpperCAmelCase_ )
print('''Test perplexity, step''' , UpperCAmelCase_ , ''':''' , UpperCAmelCase_ )
for epoch in range(int(UpperCAmelCase_ ) ):
for step, example in enumerate(UpperCAmelCase_ ):
torch.cuda.empty_cache()
a :Tuple = random.randint(0 , example.size(2 ) - context_len - 1 )
a :Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
a :Optional[int] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
a :int = True
if secondary_learner is not None:
a :Tuple = secondary_learner.forward(
torch.tensor(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCAmelCase_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
a :List[str] = -1
if predicted_q < threshold:
a :Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
a :Any = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
a :Tuple = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
a :Dict = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
test_perps.append(UpperCAmelCase_ )
print('''Test perplexity, step''' , UpperCAmelCase_ , ''':''' , UpperCAmelCase_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , UpperCAmelCase_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowerCamelCase ( ):
"""simple docstring"""
a :Union[str, Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=UpperCAmelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=UpperCAmelCase_ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=UpperCAmelCase_ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=UpperCAmelCase_ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=UpperCAmelCase_ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=UpperCAmelCase_ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=UpperCAmelCase_ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=UpperCAmelCase_ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=UpperCAmelCase_ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=UpperCAmelCase_ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=UpperCAmelCase_ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=UpperCAmelCase_ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=UpperCAmelCase_ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
a :Union[str, Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
a :Any = training_secondary_learner(
UpperCAmelCase_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
a :Any = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
a , a :Union[str, Any] = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=UpperCAmelCase_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=UpperCAmelCase_ , secondary_learner=UpperCAmelCase_ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 281 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[str] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'efficientnet'
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 600 , _lowerCamelCase = 2.0 , _lowerCamelCase = 3.1 , _lowerCamelCase = 8 , _lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , _lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , _lowerCamelCase = [] , _lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCamelCase = 0.25 , _lowerCamelCase = "swish" , _lowerCamelCase = 2560 , _lowerCamelCase = "mean" , _lowerCamelCase = 0.02 , _lowerCamelCase = 0.001 , _lowerCamelCase = 0.99 , _lowerCamelCase = 0.5 , _lowerCamelCase = 0.2 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Optional[int] = num_channels
a :List[str] = image_size
a :int = width_coefficient
a :Optional[Any] = depth_coefficient
a :Any = depth_divisor
a :Any = kernel_sizes
a :Tuple = in_channels
a :Union[str, Any] = out_channels
a :Any = depthwise_padding
a :Any = strides
a :Optional[Any] = num_block_repeats
a :Tuple = expand_ratios
a :Dict = squeeze_expansion_ratio
a :int = hidden_act
a :Dict = hidden_dim
a :Tuple = pooling_type
a :Any = initializer_range
a :Tuple = batch_norm_eps
a :Optional[int] = batch_norm_momentum
a :List[Any] = dropout_rate
a :Optional[int] = drop_connect_rate
a :Tuple = sum(_lowerCamelCase ) * 4
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-5
| 281 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _a ( unittest.TestCase ):
def __snake_case (self ) -> int:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_: List[Any] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_: str = DisjunctiveConstraint(_UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids, _UpperCamelCase ) )
with self.assertRaises(_UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __snake_case (self ) -> Tuple:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_: Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_UpperCamelCase ):
DisjunctiveConstraint(_UpperCamelCase ) # fails here
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[Any] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_: int = DisjunctiveConstraint(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = dc.update(1 )
UpperCAmelCase_: List[str] = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = dc.update(2 )
UpperCAmelCase_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = dc.update(3 )
UpperCAmelCase_: Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(_UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_: List[Any] = DisjunctiveConstraint(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[str] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 147 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
lowerCAmelCase_ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(__lowerCAmelCase )
DownloadCommand.register_subcommand(__lowerCAmelCase )
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
RunCommand.register_subcommand(__lowerCAmelCase )
ServeCommand.register_subcommand(__lowerCAmelCase )
UserCommands.register_subcommand(__lowerCAmelCase )
AddNewModelCommand.register_subcommand(__lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCAmelCase )
LfsCommands.register_subcommand(__lowerCAmelCase )
PTtoTFCommand.register_subcommand(__lowerCAmelCase )
# Let's go
lowerCAmelCase_ = parser.parse_args()
if not hasattr(__lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 231 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _A :
def __init__( self : List[str] , _A : str , _A : str=100 , _A : Optional[int]=13 , _A : Optional[Any]=30 , _A : Optional[int]=2 , _A : Tuple=3 , _A : Tuple=True , _A : Optional[int]=True , _A : List[Any]=32 , _A : Dict=4 , _A : Tuple=4 , _A : Optional[Any]=37 , _A : str="gelu" , _A : Optional[int]=0.1 , _A : List[str]=0.1 , _A : Any=10 , _A : List[str]=0.02 , _A : str=3 , _A : Union[str, Any]=None , _A : str=[0, 1, 2, 3] , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[Any] = parent
lowercase : Optional[Any] = 100
lowercase : int = batch_size
lowercase : List[str] = image_size
lowercase : str = patch_size
lowercase : Tuple = num_channels
lowercase : Tuple = is_training
lowercase : Union[str, Any] = use_labels
lowercase : int = hidden_size
lowercase : Any = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : List[str] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Optional[int] = hidden_dropout_prob
lowercase : int = attention_probs_dropout_prob
lowercase : List[str] = type_sequence_label_size
lowercase : int = initializer_range
lowercase : List[Any] = scope
lowercase : str = out_indices
lowercase : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase : List[str] = (image_size // patch_size) ** 2
lowercase : List[str] = num_patches + 1
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict = None
lowercase : Any = None
if self.use_labels:
lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[str] = BeitModel(config=_A )
model.to(_A )
model.eval()
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : str , _A : Optional[Any] , _A : Union[str, Any] , _A : List[str] , _A : Dict ) -> Dict:
"""simple docstring"""
lowercase : str = BeitForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
lowercase : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Any , _A : Dict , _A : Any , _A : Any , _A : List[Any] ) -> str:
"""simple docstring"""
lowercase : List[str] = self.type_sequence_label_size
lowercase : Tuple = BeitForImageClassification(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase : List[str] = 1
lowercase : int = BeitForImageClassification(_A )
model.to(_A )
model.eval()
lowercase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Optional[Any] , _A : Tuple , _A : Tuple , _A : List[Any] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : str = self.num_labels
lowercase : Tuple = BeitForSemanticSegmentation(_A )
model.to(_A )
model.eval()
lowercase : List[Any] = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase : int = self.prepare_config_and_inputs()
lowercase : List[Any] = config_and_inputs
lowercase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : str = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[str] = BeitModelTester(self )
lowercase : str = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Union[str, Any] = model_class(_A )
lowercase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] = [*signature.parameters.keys()]
lowercase : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def __a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_A ), BeitForMaskedImageModeling]:
continue
lowercase : List[Any] = model_class(_A )
model.to(_A )
model.train()
lowercase : Optional[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
lowercase : Any = model(**_A ).loss
loss.backward()
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase : List[Any] = False
lowercase : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_A ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : List[str] = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
lowercase : Dict = self._prepare_for_class(_A , _A , return_labels=_A )
lowercase : Any = model(**_A ).loss
loss.backward()
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = _config_zero_init(_A )
for model_class in self.all_model_classes:
lowercase : int = model_class(config=_A )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __a ( self : str ) -> int:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] = BeitModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def snake_case( ) -> int:
'''simple docstring'''
lowercase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : Tuple ) -> str:
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __a ( self : Tuple ) -> int:
"""simple docstring"""
lowercase : List[str] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(_A )
lowercase : Any = self.default_image_processor
lowercase : Union[str, Any] = prepare_img()
lowercase : Optional[Any] = image_processor(images=_A , return_tensors='''pt''' ).pixel_values.to(_A )
# prepare bool_masked_pos
lowercase : List[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(_A )
# forward pass
with torch.no_grad():
lowercase : Union[str, Any] = model(pixel_values=_A , bool_masked_pos=_A )
lowercase : List[Any] = outputs.logits
# verify the logits
lowercase : Union[str, Any] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , _A )
lowercase : str = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(_A )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _A , atol=1E-2 ) )
@slow
def __a ( self : Dict ) -> Any:
"""simple docstring"""
lowercase : Any = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(_A )
lowercase : List[Any] = self.default_image_processor
lowercase : List[str] = prepare_img()
lowercase : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
lowercase : List[Any] = model(**_A )
lowercase : Tuple = outputs.logits
# verify the logits
lowercase : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , _A )
lowercase : str = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(_A )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1E-4 ) )
lowercase : List[Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , _A )
@slow
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase : Optional[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
_A )
lowercase : str = self.default_image_processor
lowercase : List[str] = prepare_img()
lowercase : Tuple = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
lowercase : List[Any] = model(**_A )
lowercase : Any = outputs.logits
# verify the logits
lowercase : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , _A )
lowercase : Any = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(_A )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1E-4 ) )
lowercase : Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _A )
@slow
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase : Any = model.to(_A )
lowercase : Tuple = BeitImageProcessor(do_resize=_A , size=640 , do_center_crop=_A )
lowercase : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase : List[str] = Image.open(ds[0]['''file'''] )
lowercase : Optional[int] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
lowercase : Any = model(**_A )
lowercase : Tuple = outputs.logits
# verify the logits
lowercase : str = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _A )
lowercase : Optional[Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowercase : int = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=_A , )
else:
lowercase : Dict = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=_A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _A , atol=1E-4 ) )
@slow
def __a ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase : Any = model.to(_A )
lowercase : Optional[Any] = BeitImageProcessor(do_resize=_A , size=640 , do_center_crop=_A )
lowercase : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase : Dict = Image.open(ds[0]['''file'''] )
lowercase : List[str] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
lowercase : Optional[int] = model(**_A )
lowercase : List[Any] = outputs.logits.detach().cpu()
lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(500, 300)] )
lowercase : Union[str, Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _A )
lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_A )
lowercase : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _A )
| 371 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / 'model_card_template.md'
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def snake_case( __magic_name__ = None ) -> str:
'''simple docstring'''
lowercase : List[Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + user_agent
return ua
def snake_case( __magic_name__ , __magic_name__ = None , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
if token is None:
lowercase : int = HfFolder.get_token()
if organization is None:
lowercase : List[str] = whoami(__magic_name__ )['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__magic_name__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
lowercase : Optional[Any] = args.hub_token if hasattr(__magic_name__ , '''hub_token''' ) else None
lowercase : int = get_full_repo_name(__magic_name__ , token=__magic_name__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__magic_name__ , model_name=__magic_name__ , repo_name=__magic_name__ , dataset_name=args.dataset_name if hasattr(__magic_name__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__magic_name__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__magic_name__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__magic_name__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__magic_name__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__magic_name__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__magic_name__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__magic_name__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__magic_name__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
lowercase : Any = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ = None ) -> int:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : Dict = str(Path(__magic_name__ ).as_posix() )
lowercase : Any = re.search(r'''snapshots/([^/]+)/''' , __magic_name__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__magic_name__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, 'diffusers')
def snake_case( __magic_name__ = None , __magic_name__ = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
lowercase : str = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : str = Path(__magic_name__ ).expanduser()
lowercase : Dict = Path(__magic_name__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : List[Any] = new_cache_dir / old_blob_path.relative_to(__magic_name__ )
new_blob_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
os.replace(__magic_name__ , __magic_name__ )
try:
os.symlink(__magic_name__ , __magic_name__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def snake_case( __magic_name__ , __magic_name__ = None ) -> str:
'''simple docstring'''
if variant is not None:
lowercase : List[str] = weights_name.split('''.''' )
lowercase : Any = splits[:-1] + [variant] + splits[-1:]
lowercase : Tuple = '''.'''.join(__magic_name__ )
return weights_name
def snake_case( __magic_name__ , *,
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] = str(__magic_name__ )
if os.path.isfile(__magic_name__ ):
return pretrained_model_name_or_path
elif os.path.isdir(__magic_name__ ):
if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) ):
# Load from a PyTorch checkpoint
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__magic_name__ , __magic_name__ , __magic_name__ ) ):
lowercase : str = os.path.join(__magic_name__ , __magic_name__ , __magic_name__ )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__magic_name__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
lowercase : int = hf_hub_download(
__magic_name__ , filename=_add_variant(__magic_name__ , __magic_name__ ) , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __magic_name__ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__magic_name__ , __magic_name__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__magic_name__ , __magic_name__ )}' so that the correct variant file can be added.""" , __magic_name__ , )
try:
# 2. Load model file as usual
lowercase : Dict = hf_hub_download(
__magic_name__ , filename=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 116 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
A_ : int = tau * frequency / samplerate
A_ : Optional[Any] = sin(__lowercase )
A_ : Dict = cos(__lowercase )
A_ : List[str] = _sin / (2 * q_factor)
A_ : Optional[Any] = (1 - _cos) / 2
A_ : str = 1 - _cos
A_ : Tuple = 1 + alpha
A_ : Union[str, Any] = -2 * _cos
A_ : Tuple = 1 - alpha
A_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
A_ : Dict = tau * frequency / samplerate
A_ : List[Any] = sin(__lowercase )
A_ : Optional[int] = cos(__lowercase )
A_ : List[Any] = _sin / (2 * q_factor)
A_ : Optional[int] = (1 + _cos) / 2
A_ : Union[str, Any] = -1 - _cos
A_ : int = 1 + alpha
A_ : List[str] = -2 * _cos
A_ : Union[str, Any] = 1 - alpha
A_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
A_ : List[str] = tau * frequency / samplerate
A_ : Dict = sin(__lowercase )
A_ : Any = cos(__lowercase )
A_ : int = _sin / (2 * q_factor)
A_ : Union[str, Any] = _sin / 2
A_ : int = 0
A_ : Optional[int] = -ba
A_ : Tuple = 1 + alpha
A_ : str = -2 * _cos
A_ : Optional[int] = 1 - alpha
A_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
A_ : Optional[int] = tau * frequency / samplerate
A_ : Union[str, Any] = sin(__lowercase )
A_ : int = cos(__lowercase )
A_ : str = _sin / (2 * q_factor)
A_ : Optional[int] = 1 - alpha
A_ : Tuple = -2 * _cos
A_ : Union[str, Any] = 1 + alpha
A_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : float ,__lowercase : float = 1 / sqrt(2 ) ,):
'''simple docstring'''
A_ : str = tau * frequency / samplerate
A_ : Optional[Any] = sin(__lowercase )
A_ : int = cos(__lowercase )
A_ : List[Any] = _sin / (2 * q_factor)
A_ : str = 10 ** (gain_db / 40)
A_ : Dict = 1 + alpha * big_a
A_ : Tuple = -2 * _cos
A_ : Union[str, Any] = 1 - alpha * big_a
A_ : Tuple = 1 + alpha / big_a
A_ : int = -2 * _cos
A_ : List[str] = 1 - alpha / big_a
A_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : float ,__lowercase : float = 1 / sqrt(2 ) ,):
'''simple docstring'''
A_ : str = tau * frequency / samplerate
A_ : List[Any] = sin(__lowercase )
A_ : Tuple = cos(__lowercase )
A_ : Tuple = _sin / (2 * q_factor)
A_ : Dict = 10 ** (gain_db / 40)
A_ : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
A_ : Dict = (big_a + 1) + (big_a - 1) * _cos
A_ : int = (big_a - 1) - (big_a + 1) * _cos
A_ : int = (big_a - 1) + (big_a + 1) * _cos
A_ : List[str] = 2 * sqrt(__lowercase ) * alpha
A_ : Union[str, Any] = big_a * (pmc + aaa)
A_ : Any = 2 * big_a * mpc
A_ : int = big_a * (pmc - aaa)
A_ : str = ppmc + aaa
A_ : Any = -2 * pmpc
A_ : int = ppmc - aaa
A_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase ( __lowercase : int ,__lowercase : int ,__lowercase : float ,__lowercase : float = 1 / sqrt(2 ) ,):
'''simple docstring'''
A_ : int = tau * frequency / samplerate
A_ : List[str] = sin(__lowercase )
A_ : Dict = cos(__lowercase )
A_ : Union[str, Any] = _sin / (2 * q_factor)
A_ : Dict = 10 ** (gain_db / 40)
A_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
A_ : Dict = (big_a + 1) + (big_a - 1) * _cos
A_ : Any = (big_a - 1) - (big_a + 1) * _cos
A_ : int = (big_a - 1) + (big_a + 1) * _cos
A_ : Tuple = 2 * sqrt(__lowercase ) * alpha
A_ : Optional[Any] = big_a * (ppmc + aaa)
A_ : Any = -2 * big_a * pmpc
A_ : Dict = big_a * (ppmc - aaa)
A_ : Any = pmc + aaa
A_ : Any = 2 * mpc
A_ : List[Any] = pmc - aaa
A_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 140 |
from typing import Any
import numpy as np
def UpperCamelCase ( __lowercase : np.ndarray ):
'''simple docstring'''
return np.array_equal(__lowercase ,matrix.conjugate().T )
def UpperCamelCase ( __lowercase : np.ndarray ,__lowercase : np.ndarray ):
'''simple docstring'''
A_ : Union[str, Any] = v.conjugate().T
A_ : Dict = v_star.dot(__lowercase )
assert isinstance(__lowercase ,np.ndarray )
return (v_star_dot.dot(__lowercase )) / (v_star.dot(__lowercase ))
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Dict = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A_ : List[Any] = np.array([[1], [2], [3]] )
assert is_hermitian(__lowercase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(__lowercase ,__lowercase ) )
A_ : List[str] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__lowercase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(__lowercase ,__lowercase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 140 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase__ (A ) ->Optional[Any]:
"""simple docstring"""
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowercase__ (A , A ) ->Tuple:
"""simple docstring"""
lowercase__ : int= {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__ : Any= key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowercase__ : Optional[int]= key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowercase__ : Optional[Any]= key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowercase__ : List[str]= key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowercase__ : str= key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowercase__ : Any= key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowercase__ : Optional[Any]= key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowercase__ : Tuple= key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowercase__ : Optional[int]= key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowercase__ : Optional[int]= key.replace("image_encoder.module" , "flava.image_model" )
lowercase__ : Optional[Any]= key.replace("text_encoder.module" , "flava.text_model" )
lowercase__ : List[str]= key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowercase__ : str= key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowercase__ : str= key.replace("text_projection" , "flava.text_projection" )
lowercase__ : List[str]= key.replace("image_projection" , "flava.image_projection" )
lowercase__ : List[str]= value.float()
for key, value in codebook_state_dict.items():
lowercase__ : Any= value
return upgrade
@torch.no_grad()
def lowercase__ (A , A , A , A=None ) ->List[Any]:
"""simple docstring"""
if config_path is not None:
lowercase__ : int= FlavaConfig.from_pretrained(__a )
else:
lowercase__ : Any= FlavaConfig()
lowercase__ : Any= FlavaForPreTraining(__a ).eval()
lowercase__ : Union[str, Any]= convert_dalle_checkpoint(__a , __a , save_checkpoint=__a )
if os.path.exists(__a ):
lowercase__ : List[str]= torch.load(__a , map_location="cpu" )
else:
lowercase__ : int= torch.hub.load_state_dict_from_url(__a , map_location="cpu" )
lowercase__ : Dict= upgrade_state_dict(__a , __a )
hf_model.load_state_dict(__a )
lowercase__ : List[str]= hf_model.state_dict()
lowercase__ : Optional[Any]= count_parameters(__a )
lowercase__ : List[str]= count_parameters(__a ) + count_parameters(__a )
assert torch.allclose(__a , __a , atol=1e-3 )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a : Tuple = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 365 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a : Dict = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
a : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
a : int = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Optional[int]= spearmanr(snake_case__ , snake_case__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 150 | 0 |
from __future__ import annotations
def __A ( __lowerCamelCase ) -> int:
if not nums:
return 0
a = nums[0]
a = 0
for num in nums[1:]:
a , a = (
max_excluding + num,
max(__lowerCamelCase , __lowerCamelCase ),
)
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase : Any = 16
__UpperCamelCase : Union[str, Any] = 32
def __A ( __lowerCamelCase , __lowerCamelCase = 16 ) -> List[str]:
a = AutoTokenizer.from_pretrained("""bert-base-cased""" )
a = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
a = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a = 16
elif accelerator.mixed_precision != "no":
a = 8
else:
a = None
return tokenizer.pad(
__lowerCamelCase , padding="""longest""" , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
a = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
a = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCamelCase : Any = mocked_dataloaders # noqa: F811
def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCamelCase ) == "1":
a = 2
# New Code #
a = int(args.gradient_accumulation_steps )
# Initialize accelerator
a = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["""lr"""]
a = int(config["""num_epochs"""] )
a = int(config["""seed"""] )
a = int(config["""batch_size"""] )
a = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCamelCase )
a , a = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Instantiate optimizer
a = AdamW(params=model.parameters() , lr=__lowerCamelCase )
# Instantiate scheduler
a = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCamelCase ):
a = model(**__lowerCamelCase )
a = output.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a = model(**__lowerCamelCase )
a = outputs.logits.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __lowerCamelCase )
def __A ( ) -> int:
a = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
a = parser.parse_args()
a = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 228 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = ['''image_processor''', '''tokenizer''']
_lowerCAmelCase = '''BlipImageProcessor'''
_lowerCAmelCase = '''AutoTokenizer'''
def __init__( self : Any , _a : List[Any] , _a : List[Any] , _a : Optional[Any] ):
super().__init__(_a , _a )
# add QFormer tokenizer
a__: Optional[int] =qformer_tokenizer
def __call__( self : Optional[Any] , _a : ImageInput = None , _a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a : bool = True , _a : Union[bool, str, PaddingStrategy] = False , _a : Union[bool, str, TruncationStrategy] = None , _a : Optional[int] = None , _a : int = 0 , _a : Optional[int] = None , _a : Optional[bool] = None , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = True , _a : Optional[Union[str, TensorType]] = None , **_a : str , ):
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
a__: Union[str, Any] =BatchFeature()
if text is not None:
a__: List[Any] =self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
encoding.update(_a )
a__: List[Any] =self.qformer_tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
a__: Any =qformer_text_encoding.pop("input_ids" )
a__: Union[str, Any] =qformer_text_encoding.pop("attention_mask" )
if images is not None:
a__: str =self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def _lowerCamelCase ( self : List[str] , *_a : str , **_a : Optional[int] ):
return self.tokenizer.batch_decode(*_a , **_a )
def _lowerCamelCase ( self : int , *_a : int , **_a : Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self : Optional[Any] ):
a__: Optional[Any] =self.tokenizer.model_input_names
a__: Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self : Dict , _a : Optional[Any] , **_a : str ):
if os.path.isfile(_a ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(_a , exist_ok=_a )
a__: int =os.path.join(_a , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(_a )
return super().save_pretrained(_a , **_a )
@classmethod
def _lowerCamelCase ( cls : str , _a : List[Any] , **_a : int ):
a__: Optional[int] =AutoTokenizer.from_pretrained(_a , subfolder="qformer_tokenizer" )
a__: str =cls._get_arguments_from_pretrained(_a , **_a )
args.append(_a )
return cls(*_a )
| 363 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 42 | 0 |
def lowerCAmelCase_ ( _snake_case : int ) -> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__magic_name__ : str = gray_code_sequence_string(_snake_case )
#
# convert them to integers
for i in range(len(_snake_case ) ):
__magic_name__ : int = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase_ ( _snake_case : int ) -> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__magic_name__ : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__magic_name__ : Optional[int] = gray_code_sequence_string(bit_count - 1 )
__magic_name__ : List[str] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__magic_name__ : int = "0" + smaller_sequence[i]
sequence.append(_snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__magic_name__ : Dict = "1" + smaller_sequence[i]
sequence.append(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
def _a ( lowerCamelCase: Optional[Any] ) -> Dict:
'''simple docstring'''
if isinstance(lowerCamelCase , np.ndarray ):
return list(tensor.shape )
__A = tf.shape(lowerCamelCase )
if tensor.shape == tf.TensorShape(lowerCamelCase ):
return dynamic
__A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase )]
def _a ( lowerCamelCase: List[str] , lowerCamelCase: List[Any] = None , lowerCamelCase: int = None ) -> Dict:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowerCamelCase , name=lowerCamelCase )
def _a ( lowerCamelCase: List[str] , lowerCamelCase: Union[str, Any] , lowerCamelCase: Tuple , lowerCamelCase: Tuple=1e-5 , lowerCamelCase: List[Any]=-1 ) -> Dict:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase , lowerCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__A , __A = tf.nn.moments(lowerCamelCase , axes=[axis] , keepdims=lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__A = [1] * inputs.shape.rank
__A = shape_list(lowerCamelCase )[axis]
__A = tf.reshape(lowerCamelCase , lowerCamelCase )
__A = tf.reshape(lowerCamelCase , lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
__A = tf.nn.batch_normalization(
lowerCamelCase , lowerCamelCase , lowerCamelCase , offset=lowerCamelCase , scale=lowerCamelCase , variance_epsilon=lowerCamelCase , )
return outputs
def _a ( lowerCamelCase: Any , lowerCamelCase: Union[str, Any]=0 , lowerCamelCase: str=-1 ) -> Any:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__A = tf.shape(lowerCamelCase )
__A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCamelCase , lowerCamelCase )
def _a ( lowerCamelCase: Tuple ) -> Any:
'''simple docstring'''
if not isinstance(lowerCamelCase , tf.Tensor ):
__A = tf.convert_to_tensor(lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _a ( lowerCamelCase: Optional[int] , lowerCamelCase: Optional[int] , lowerCamelCase: str = "input_ids" ) -> Optional[Any]:
'''simple docstring'''
tf.debugging.assert_less(
lowerCamelCase , tf.cast(lowerCamelCase , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase )}) must be smaller than the embedding """
F"""layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _a ( lowerCamelCase: Union[str, Any] , lowerCamelCase: Optional[Any] , lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
__A = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__A = [x for x in data if len(lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
__A = np.asarray(lowerCamelCase )
__A = 1
__A = np.array_split(lowerCamelCase , lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__A = np.array_split(lowerCamelCase , lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCamelCase ):
__A = chunk_data
else:
__A = data
def _a ( lowerCamelCase: Tuple , lowerCamelCase: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if name in group.attrs:
__A = [n.decode('''utf8''' ) if hasattr(lowerCamelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
__A = []
__A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(lowerCamelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _a ( lowerCamelCase: Optional[int] ) -> Dict:
'''simple docstring'''
def _expand_single_ad_tensor(lowerCamelCase: List[Any] ):
if isinstance(lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase )
| 355 |
from __future__ import annotations
snake_case__ : Dict = [True] * 1000001
snake_case__ : int = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
snake_case__ : str = False
i += 1
def _a ( lowerCamelCase: int ) -> bool:
'''simple docstring'''
return seive[n]
def _a ( lowerCamelCase: int ) -> bool:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(lowerCamelCase ) )
def _a ( lowerCamelCase: int = 1_00_00_00 ) -> list[int]:
'''simple docstring'''
__A = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowerCamelCase ) and not contains_an_even_digit(lowerCamelCase ):
__A = str(lowerCamelCase )
__A = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowerCamelCase ) )]
if all(is_prime(lowerCamelCase ) for i in list_nums ):
result.append(lowerCamelCase )
return result
def _a ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 250 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["torch", "transformers", "onnx"]
def __init__( self : str ,*_a : List[Any] ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : List[str] ,*_a : List[str] ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Optional[int] ,*_a : Optional[Any] ,**_a : str ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
class UpperCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self : List[Any] ,*_a : Optional[int] ,**_a : int ):
'''simple docstring'''
requires_backends(self ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Any ,*_a : Dict ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Any ,*_a : Any ,**_a : Dict ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
class UpperCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__UpperCAmelCase : Any = ["torch", "transformers", "onnx"]
def __init__( self : List[Any] ,*_a : List[Any] ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Dict ,*_a : List[Any] ,**_a : Tuple ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : int ,*_a : List[Any] ,**_a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
class UpperCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : str ,*_a : Optional[int] ,**_a : Optional[Any] ):
'''simple docstring'''
requires_backends(self ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Dict ,*_a : Tuple ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Optional[Any] ,*_a : Dict ,**_a : Tuple ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
class UpperCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Union[str, Any] ,*_a : Any ,**_a : Dict ):
'''simple docstring'''
requires_backends(self ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Dict ,*_a : List[str] ,**_a : List[str] ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Any ,*_a : Optional[Any] ,**_a : Tuple ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
class UpperCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : int ,*_a : Optional[int] ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Tuple ,*_a : int ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
@classmethod
def __lowercase ( cls : Any ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(cls ,['torch', 'transformers', 'onnx'] )
| 271 |
def __UpperCamelCase ( _lowerCAmelCase = 100_0000 ) -> int:
"""simple docstring"""
A : str = limit + 1
A : Tuple = [0] * limit
for first_term in range(1 , _lowerCAmelCase ):
for n in range(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
A : Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
A : Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116 | 0 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> np.ndarray:
A__ = np.array(lowercase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
A__ = 0
A__ = 0
A__ = 0
A__ = 0
# compute the shape of the output matrix
A__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ = 0
A__ = 0
return updated_arr
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> np.ndarray:
A__ = np.array(lowercase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
A__ = 0
A__ = 0
A__ = 0
A__ = 0
# compute the shape of the output matrix
A__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ = 0
A__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
SCREAMING_SNAKE_CASE = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 230 |
"""simple docstring"""
SCREAMING_SNAKE_CASE = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 230 | 1 |
def lowerCamelCase__ ( A__ : Optional[Any]=28123 ):
'''simple docstring'''
__lowerCamelCase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__lowerCamelCase = set()
__lowerCamelCase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(A__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 12 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
SCREAMING_SNAKE_CASE__ = 2_048
SCREAMING_SNAKE_CASE__ = 4_096
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = os.environ.pop("PROCESS_TRAIN", "false")
SCREAMING_SNAKE_CASE__ = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Any:
"""simple docstring"""
def choose_first(_UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=False ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
snake_case = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
snake_case = {'id': example['id']}
snake_case = example['annotations']
snake_case = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case = ['yes'] if 1 in yes_no_answer else ['no']
snake_case = snake_case = []
snake_case = snake_case = []
snake_case = ['<cls>']
else:
snake_case = ['short']
snake_case = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
snake_case = ['long']
snake_case = choose_first(annotation['long_answer'] , is_long_answer=_UpperCamelCase )
snake_case = []
answer.update(_UpperCamelCase )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case = True
else:
snake_case = False
snake_case = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , _UpperCamelCase ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = _get_single_answer(_UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = example['document']['tokens']
snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case = example['document']['tokens']
snake_case = answer['start_token']
snake_case = answer['end_token']
snake_case = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case = doc['is_html'][answer['start_token'] : answer['end_token']]
snake_case = doc['token'][answer['start_token'] : answer['end_token']]
snake_case = ' '.join([old[i] for i in range(len(_UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , _UpperCamelCase , end='\n' )
print('Old:' , _UpperCamelCase , end='\n\n' )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : Dict=True ) -> Optional[Any]:
"""simple docstring"""
snake_case = get_context_and_ans(_UpperCamelCase , assertion=_UpperCamelCase )
snake_case = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case = tokenizer(example['question']['text'] , out['context'] ).input_ids
snake_case = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = []
snake_case = []
snake_case = input_ids[:q_len]
snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(_UpperCamelCase ),
"end_token": [-1_0_0] * len(_UpperCamelCase ),
"category": category,
},
}
snake_case = out['context'].split()
snake_case = splitted_context[answer['end_token']]
snake_case = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=_UpperCamelCase , ).input_ids )
snake_case = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=_UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case = len(tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
snake_case = answer['start_token']
snake_case = answer['end_token']
if assertion:
snake_case = tokenizer.decode(_UpperCamelCase )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , _UpperCamelCase , end='\n\n' )
if len(_UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case = input_ids[:q_len]
snake_case = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
snake_case = []
snake_case = []
snake_case = []
snake_case = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case = start_token - i + q_len
snake_case = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
snake_case = -1_0_0
snake_case = -1_0_0
answers_category.append('null' )
snake_case = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCamelCase )
answers_end_token.append(_UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(_UpperCamelCase ) )
print('Old:' , tokenizer.decode(_UpperCamelCase ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=2_0_4_8 , _UpperCamelCase : Union[str, Any]=4_0_9_6 , _UpperCamelCase : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = get_strided_contexts_and_ans(
_UpperCamelCase , _UpperCamelCase , doc_stride=_UpperCamelCase , max_length=_UpperCamelCase , assertion=_UpperCamelCase , )
return example
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] ) -> Any:
"""simple docstring"""
with jsonlines.open(_UpperCamelCase , 'a' ) as writer:
for example in tqdm(_UpperCamelCase , total=len(_UpperCamelCase ) , desc='Saving samples ... ' ):
snake_case = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = load_dataset("natural_questions")
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
SCREAMING_SNAKE_CASE__ = data["train" if PROCESS_TRAIN == "true" else "validation"]
SCREAMING_SNAKE_CASE__ = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
SCREAMING_SNAKE_CASE__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
SCREAMING_SNAKE_CASE__ = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
SCREAMING_SNAKE_CASE__ = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 150 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A : Tuple = logging.get_logger(__name__)
A : Tuple = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "longformer"
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[List[int], int] = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 30522 , SCREAMING_SNAKE_CASE : int = 768 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 3072 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : float = 1e-12 , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[Any] = attention_window
_A : int = sep_token_id
_A : Tuple = bos_token_id
_A : Any = eos_token_id
_A : List[str] = vocab_size
_A : Any = hidden_size
_A : Optional[int] = num_hidden_layers
_A : int = num_attention_heads
_A : Dict = hidden_act
_A : List[Any] = intermediate_size
_A : int = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Any = type_vocab_size
_A : Dict = initializer_range
_A : Any = layer_norm_eps
_A : List[Any] = onnx_export
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : "PretrainedConfig" , SCREAMING_SNAKE_CASE : str = "default" , SCREAMING_SNAKE_CASE : "List[PatchingSpec]" = None):
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Optional[Any] = True
@property
def A ( self : List[str]):
if self.task == "multiple-choice":
_A : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
])
@property
def A ( self : str):
_A : int = super().outputs
if self.task == "default":
_A : str = {0: 'batch'}
return outputs
@property
def A ( self : List[Any]):
return 1e-4
@property
def A ( self : Dict):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def A ( self : str , SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
_A : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_A : Tuple = torch.zeros_like(inputs['input_ids'])
# make every second token global
_A : Dict = 1
return inputs
| 227 |
'''simple docstring'''
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ):
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a = numpy.vectorize(lambda a_ : x % 36 )
a = numpy.vectorize(a_ )
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : numpy.ndarray):
_A : Union[str, Any] = self.modulus(SCREAMING_SNAKE_CASE) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_A : Optional[int] = encrypt_key.shape[0]
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
return self.key_string.index(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
return self.key_string[round(SCREAMING_SNAKE_CASE)]
def A ( self : List[Any]):
_A : Optional[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : int = det % len(self.key_string)
_A : str = len(self.key_string)
if greatest_common_divisor(SCREAMING_SNAKE_CASE , len(self.key_string)) != 1:
_A : Optional[int] = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(SCREAMING_SNAKE_CASE)
def A ( self : int , SCREAMING_SNAKE_CASE : str):
_A : List[Any] = [char for char in text.upper() if char in self.key_string]
_A : List[str] = chars[-1]
while len(SCREAMING_SNAKE_CASE) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE)
return "".join(SCREAMING_SNAKE_CASE)
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str):
_A : Optional[int] = self.process_text(text.upper())
_A : List[str] = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[Any] = text[i : i + self.break_key]
_A : Optional[int] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : List[str] = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[
0
]
_A : str = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def A ( self : Union[str, Any]):
_A : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : Optional[int] = det % len(self.key_string)
_A : List[str] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
_A : Union[str, Any] = i
break
_A : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE))
def A ( self : Any , SCREAMING_SNAKE_CASE : str):
_A : List[str] = self.make_decrypt_key()
_A : Dict = self.process_text(text.upper())
_A : str = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[int] = text[i : i + self.break_key]
_A : Union[str, Any] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : Optional[int] = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[0]
_A : Tuple = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
_A : List[Any] = int(input('Enter the order of the encryption key: ' ) )
_A : List[str] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowerCamelCase ):
_A : str = [int(lowerCamelCase ) for x in input().split()]
hill_matrix.append(lowerCamelCase )
_A : Dict = HillCipher(numpy.array(lowerCamelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_A : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_A : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowerCamelCase ) )
elif option == "2":
_A : int = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 227 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : str = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase__ ( _lowerCamelCase ):
lowercase__ = """deformable_detr"""
lowercase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Optional[int]=3 ,lowerCamelCase__ : List[str]=300 ,lowerCamelCase__ : Tuple=1024 ,lowerCamelCase__ : Optional[int]=6 ,lowerCamelCase__ : Dict=1024 ,lowerCamelCase__ : int=8 ,lowerCamelCase__ : str=6 ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : Dict=8 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : List[Any]="relu" ,lowerCamelCase__ : Any=256 ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0_2 ,lowerCamelCase__ : Dict=1.0 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : List[str]="sine" ,lowerCamelCase__ : Any="resnet50" ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : Union[str, Any]=4 ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : List[str]=300 ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : str=5 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.2_5 ,lowerCamelCase__ : Optional[Any]=False ,**lowerCamelCase__ : int ,):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_UpperCamelCase : str = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
_UpperCamelCase : Any = backbone_config.get('model_type' )
_UpperCamelCase : int = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase : Optional[int] = config_class.from_dict(lowerCAmelCase_ )
_UpperCamelCase : Optional[int] = use_timm_backbone
_UpperCamelCase : str = backbone_config
_UpperCamelCase : Optional[int] = num_channels
_UpperCamelCase : Union[str, Any] = num_queries
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = d_model
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = encoder_layers
_UpperCamelCase : Union[str, Any] = encoder_attention_heads
_UpperCamelCase : Dict = decoder_ffn_dim
_UpperCamelCase : Any = decoder_layers
_UpperCamelCase : str = decoder_attention_heads
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : Optional[Any] = activation_dropout
_UpperCamelCase : List[Any] = activation_function
_UpperCamelCase : Any = init_std
_UpperCamelCase : Dict = init_xavier_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : Union[str, Any] = auxiliary_loss
_UpperCamelCase : Dict = position_embedding_type
_UpperCamelCase : List[Any] = backbone
_UpperCamelCase : Tuple = use_pretrained_backbone
_UpperCamelCase : List[Any] = dilation
# deformable attributes
_UpperCamelCase : Tuple = num_feature_levels
_UpperCamelCase : List[Any] = encoder_n_points
_UpperCamelCase : Tuple = decoder_n_points
_UpperCamelCase : Dict = two_stage
_UpperCamelCase : Dict = two_stage_num_proposals
_UpperCamelCase : int = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_UpperCamelCase : Dict = class_cost
_UpperCamelCase : Optional[int] = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[str] = mask_loss_coefficient
_UpperCamelCase : List[Any] = dice_loss_coefficient
_UpperCamelCase : Optional[int] = bbox_loss_coefficient
_UpperCamelCase : List[Any] = giou_loss_coefficient
_UpperCamelCase : List[str] = eos_coefficient
_UpperCamelCase : Tuple = focal_alpha
_UpperCamelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase_ ,**lowerCAmelCase_ )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.d_model
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase : Optional[int] = self.backbone_config.to_dict()
_UpperCamelCase : Optional[Any] = self.__class__.model_type
return output
| 83 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
# down
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_snake_case = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = 2 * out_channels if double_z else out_channels
_snake_case = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = x
_snake_case = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case = down_block(lowerCAmelCase_ )
# middle
_snake_case = self.mid_block(lowerCAmelCase_ )
# post-process
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = in_channels if norm_type == 'spatial' else None
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
if norm_type == "spatial":
_snake_case = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_snake_case = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = z
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
else:
_snake_case = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
super().__init__()
_snake_case = n_e
_snake_case = vq_embed_dim
_snake_case = beta
_snake_case = legacy
_snake_case = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case = self.used.shape[0]
_snake_case = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case = self.re_embed
_snake_case = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_snake_case = n_e
_snake_case = sane_index_shape
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
_snake_case = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case = match.argmax(-1 )
_snake_case = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case = 0 # simply set to zero
_snake_case = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_snake_case = self.embedding(lowerCAmelCase_ ).view(z.shape )
_snake_case = None
_snake_case = None
# compute loss for embedding
if not self.legacy:
_snake_case = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case = self.remap_to_used(lowerCAmelCase_ )
_snake_case = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.remap is not None:
_snake_case = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case = self.unmap_to_all(lowerCAmelCase_ )
_snake_case = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case = self.embedding(lowerCAmelCase_ )
if shape is not None:
_snake_case = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = parameters
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_snake_case = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case = deterministic
_snake_case = torch.exp(0.5 * self.logvar )
_snake_case = torch.exp(self.logvar )
if self.deterministic:
_snake_case = _snake_case = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case = self.mean + self.std * sample
return x
def lowerCamelCase ( self , lowerCAmelCase_=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 42 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a , a , a , ):
__a = len(a )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(a ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , a , a , )
def _lowerCamelCase( a ):
__a = []
depth_first_search([] , [] , [] , a , a )
# Print all the boards
for board in boards:
for column in board:
print(a )
print("" )
print(len(a ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 268 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowercase_ ( _lowerCamelCase : Dict=None):
lowercase__ : Union[str, Any] = argparse.ArgumentParser(add_help=_lowerCamelCase , allow_abbrev=_lowerCamelCase)
# The main config parser
lowercase__ : Dict = config_command_parser(_lowerCamelCase)
# The subparser to add commands to
lowercase__ : List[str] = config_parser.add_subparsers(title="subcommands" , dest="subcommand")
# Then add other parsers with the parent parser
default_command_parser(_lowerCamelCase , parents=[parent_parser])
update_command_parser(_lowerCamelCase , parents=[parent_parser])
return config_parser
def lowercase_ ( ):
lowercase__ : Tuple = get_config_parser()
lowercase__ : List[Any] = config_parser.parse_args()
if not hasattr(_lowerCamelCase , "func"):
config_parser.print_help()
exit(1)
# Run
args.func(_lowerCamelCase)
if __name__ == "__main__":
main()
| 87 |
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( snake_case ) -> dict:
_lowercase : Dict = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(snake_case ).json()
def _A ( snake_case = 10 ) -> list[dict]:
_lowercase : List[Any] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_lowercase : List[str] = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def _A ( snake_case = 10 ) -> str:
_lowercase : Union[str, Any] = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 250 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A : Union[str, Any] = nn.Linear(3, 4 )
A : Union[str, Any] = nn.BatchNormad(4 )
A : Optional[Any] = nn.Linear(4, 5 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ ):
return (args[0] + 1,) + args[1:], kwargs
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return output + 1
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
self.assertEqual(test_model._hf_hook, lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Optional[int] = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__, append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook, lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Optional[int] = test_model(x + 1 )
A : List[str] = test_model(x + 2 )
A : List[str] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Optional[Any] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Any = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = torch.randn(2, 3 )
A : Any = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : str = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Tuple = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[int] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Dict = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, output + 2, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : List[Any] = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Union[str, Any] = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1 ) )
self.assertTrue(outputa.requires_grad )
A : int = True
A : Tuple = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A : str = torch.randn(2, 3 )
A : int = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__, AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
A : int = torch.randn(2, 3 ).to(0 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(0 ) )
def _lowerCAmelCase ( self ):
A : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : int = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : int = torch.randn(2, 3 )
A : List[Any] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
A : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : int = torch.randn(2, 3 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Optional[int] = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : List[str] = torch.randn(2, 3 )
A : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[str] = torch.randn(2, 3 )
A : List[str] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : List[str] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Any = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : Optional[Any] = torch.randn(2, 3 )
A : Tuple = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict(), offload_buffers=lowerCamelCase__, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[Any] = torch.randn(2, 3 )
A : Dict = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
| 115 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_:Any = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""MobileNetV2FeatureExtractor"""]
SCREAMING_SNAKE_CASE_:Tuple = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class a ( unittest.TestCase ):
@require_torch
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = pipeline(
task='''zero-shot-audio-classification''' ,model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
snake_case__ : Optional[Any] = load_dataset('''ashraq/esc50''' )
snake_case__ : List[str] = dataset['''train''']['''audio'''][-1]['''array''']
snake_case__ : List[Any] = audio_classifier(__lowercase ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowercase ) ,[{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] ,)
@unittest.skip('''No models are available in TF''' )
def __lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : int = pipeline(
task='''zero-shot-audio-classification''' ,model='''laion/clap-htsat-unfused''' ,)
# This is an audio of a dog
snake_case__ : Union[str, Any] = load_dataset('''ashraq/esc50''' )
snake_case__ : Optional[Any] = dataset['''train''']['''audio'''][-1]['''array''']
snake_case__ : Optional[int] = audio_classifier(__lowercase ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowercase ) ,[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] ,)
snake_case__ : Union[str, Any] = audio_classifier([audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowercase ) ,[
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
snake_case__ : int = audio_classifier(
[audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ,batch_size=5 )
self.assertEqual(
nested_simplify(__lowercase ) ,[
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
@unittest.skip('''No models are available in TF''' )
def __lowerCamelCase ( self :Union[str, Any] ):
pass
| 230 |
import random
from .binary_exp_mod import bin_exp_mod
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=1000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : Tuple = n - 1
snake_case__ : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : List[str] = 0
while count < prec:
snake_case__ : List[str] = random.randint(2 , n - 1 )
snake_case__ : Optional[Any] = bin_exp_mod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if b != 1:
snake_case__ : List[Any] = True
for _ in range(__lowerCAmelCase ):
if b == n - 1:
snake_case__ : List[str] = False
break
snake_case__ : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = 42
__snake_case = 42
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = 42
__snake_case = None
__snake_case = None
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = "train"
__snake_case = "dev"
__snake_case = "test"
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
raise NotImplementedError
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase ):
raise NotImplementedError
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase=1 , _UpperCAmelCase="[SEP]" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=-1_00 , _UpperCAmelCase=0 , _UpperCAmelCase=True , ):
snake_case_ = {label: i for i, label in enumerate(_UpperCAmelCase )}
snake_case_ = []
for ex_index, example in enumerate(_UpperCAmelCase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _UpperCAmelCase , len(_UpperCAmelCase ) )
snake_case_ = []
snake_case_ = []
for word, label in zip(example.words , example.labels ):
snake_case_ = tokenizer.tokenize(_UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_UpperCAmelCase ) > 0:
tokens.extend(_UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
snake_case_ = tokenizer.num_special_tokens_to_add()
if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count:
snake_case_ = tokens[: (max_seq_length - special_tokens_count)]
snake_case_ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
snake_case_ = [sequence_a_segment_id] * len(_UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
snake_case_ = [cls_token] + tokens
snake_case_ = [pad_token_label_id] + label_ids
snake_case_ = [cls_token_segment_id] + segment_ids
snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
snake_case_ = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase )
# Zero-pad up to the sequence length.
snake_case_ = max_seq_length - len(_UpperCAmelCase )
if pad_on_left:
snake_case_ = ([pad_token] * padding_length) + input_ids
snake_case_ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
snake_case_ = ([pad_token_segment_id] * padding_length) + segment_ids
snake_case_ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ = None
features.append(
InputFeatures(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , label_ids=_UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = 42
__snake_case = nn.CrossEntropyLoss().ignore_index
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = Split.train , ):
# Load data features from cache or dataset file
snake_case_ = os.path.join(
_UpperCAmelCase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ = cached_features_file + '''.lock'''
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
snake_case_ = torch.load(_UpperCAmelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
snake_case_ = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self , _UpperCAmelCase ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = -1_0_0
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = Split.train , ):
snake_case_ = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
snake_case_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCamelCase__ ( self ):
snake_case_ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , _UpperCAmelCase ):
return self.features[i]
| 267 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ()-> Optional[Any]:
"""simple docstring"""
snake_case_ = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return image
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE )
snake_case_ = val
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
snake_case_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
snake_case_ = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE ), v_bias) )
snake_case_ = qkv_bias
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
snake_case_ = 364 if '''coco''' in model_name else 224
snake_case_ = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
snake_case_ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case_ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
snake_case_ = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE , text_config=SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False )-> List[Any]:
"""simple docstring"""
snake_case_ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
snake_case_ = tokenizer('''\n''' , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids[0]
snake_case_ , snake_case_ = get_blipa_config(SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
snake_case_ = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval()
snake_case_ = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
snake_case_ , snake_case_ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
snake_case_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case_ , snake_case_ , snake_case_ = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , is_eval=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
original_model.eval()
print('''Done!''' )
# update state dict keys
snake_case_ = original_model.state_dict()
snake_case_ = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE )
if key.startswith('''Qformer.bert''' ):
snake_case_ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
snake_case_ = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
snake_case_ = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
snake_case_ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
snake_case_ = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
snake_case_ = key.replace('''t5''' , '''language''' )
snake_case_ = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = hf_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case_ = load_demo_image()
snake_case_ = vis_processors['''eval'''](SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE )
snake_case_ = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
# create processor
snake_case_ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
snake_case_ = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
snake_case_ = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
hf_model.to(SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
snake_case_ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
snake_case_ = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).logits
else:
snake_case_ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
snake_case_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
snake_case_ = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case_ = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case_ = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=SCREAMING_SNAKE_CASE )
else:
# cast to same type
snake_case_ = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
snake_case_ = ''''''
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
snake_case_ = original_model.generate({'''image''': original_pixel_values} )
snake_case_ = hf_model.generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , SCREAMING_SNAKE_CASE )
snake_case_ = input_ids.shape[1]
snake_case_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE )
snake_case_ = [text.strip() for text in output_text]
print('''HF generation:''' , SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 267 | 1 |
import math
import unittest
def a( A : int ) -> bool:
"""simple docstring"""
assert isinstance(A , A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 227 |
def a( A : list ) -> list:
"""simple docstring"""
if any(not isinstance(A , A ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 227 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _a ( _lowercase : List[str] ):
'''simple docstring'''
def wrapper(*_lowercase : int , **_lowercase : Dict ):
__UpperCAmelCase : Any = timeit.default_timer()
__UpperCAmelCase : Tuple = func(*UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Any = timeit.default_timer() - starttime
return delta
__UpperCAmelCase : int = func.__name__
return wrapper
def _a ( _lowercase : dict , _lowercase : Optional[int]=100 , _lowercase : Optional[Any]=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[Any] = seq_shapes or {}
for i in range(UpperCAmelCase_ ):
__UpperCAmelCase : Dict = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase_ , _ArrayXD ):
__UpperCAmelCase : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase_ , datasets.Value ):
if v.dtype == "string":
__UpperCAmelCase : Optional[int] = '''The small grey turtle was surprisingly fast when challenged.'''
else:
__UpperCAmelCase : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase_ , datasets.Sequence ):
while isinstance(UpperCAmelCase_ , datasets.Sequence ):
__UpperCAmelCase : Optional[Any] = v.feature
__UpperCAmelCase : int = seq_shapes[k]
__UpperCAmelCase : Any = np.random.rand(*UpperCAmelCase_ ).astype(v.dtype )
__UpperCAmelCase : Optional[int] = data
dummy_data.append((i, example) )
return dummy_data
def _a ( _lowercase : List[str] , _lowercase : Any , _lowercase : Dict=100 , _lowercase : int=None ):
'''simple docstring'''
__UpperCAmelCase : Any = generate_examples(UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes=UpperCAmelCase_ )
with ArrowWriter(features=UpperCAmelCase_ , path=UpperCAmelCase_ ) as writer:
for key, record in dummy_data:
__UpperCAmelCase : Optional[int] = features.encode_example(UpperCAmelCase_ )
writer.write(UpperCAmelCase_ )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
__UpperCAmelCase : Tuple = datasets.Dataset.from_file(filename=UpperCAmelCase_ , info=datasets.DatasetInfo(features=UpperCAmelCase_ ) )
return dataset
| 353 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : Tuple[int]
def lowerCamelCase__ ( self : Any ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCamelCase__ ( self : Any ) -> torch.Tensor:
__UpperCAmelCase : Dict = torch.arange(self.height * self.width )
__UpperCAmelCase : Dict = torch.stack(
[
pixel_indices % self.width,
torch.div(snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def lowerCamelCase__ ( self : Any ) -> int:
__UpperCAmelCase , *__UpperCAmelCase : str = self.shape
__UpperCAmelCase : Dict = int(np.prod(snake_case ) )
__UpperCAmelCase : Tuple = self.get_image_coords()
__UpperCAmelCase : List[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__UpperCAmelCase : Any = self.get_camera_rays(snake_case )
__UpperCAmelCase : List[str] = rays.view(snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : torch.Tensor ) -> torch.Tensor:
__UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase : List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__UpperCAmelCase : List[str] = coords.view(snake_case , -1 , 2 )
__UpperCAmelCase : Optional[Any] = self.resolution()
__UpperCAmelCase : Tuple = self.fov()
__UpperCAmelCase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
__UpperCAmelCase : Union[str, Any] = fracs * torch.tan(fov / 2 )
__UpperCAmelCase : str = fracs.view(snake_case , -1 , 2 )
__UpperCAmelCase : Any = (
self.z.view(snake_case , 1 , 3 )
+ self.x.view(snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
__UpperCAmelCase : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=snake_case )
__UpperCAmelCase : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(snake_case , *snake_case , 2 , 3 )
def lowerCamelCase__ ( self : Any , snake_case : int , snake_case : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case , height=snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : List[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__UpperCAmelCase : Dict = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__UpperCAmelCase : Any = -z * 4
__UpperCAmelCase : Dict = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
__UpperCAmelCase : List[str] = np.cross(_lowercase , _lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , width=_lowercase , height=_lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowercase )) , )
| 240 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : int = JsonDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_json_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : List[Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : int = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = JsonDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_json_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Any = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE_ : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : List[Any] = JsonDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = features.copy()
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = JsonDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : Optional[int] = JsonDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_json_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = jsonl_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = [jsonl_path]
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = JsonDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_json_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_json_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : Optional[int] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Tuple = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = JsonDatasetReader({"train": jsonl_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_json_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Any = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "train"
SCREAMING_SNAKE_CASE_ : List[Any] = {"train": jsonl_path, "test": jsonl_path}
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE_ : List[str] = JsonDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_json_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
return json.load(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return [json.loads(lowerCAmelCase ) for line in buffer]
class a__ :
@pytest.mark.parametrize("lines, load_json_function",[(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : Optional[Any],_A : Optional[Any],_A : str,_A : Union[str, Any] ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,lines=_A ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ : Dict = load_json_function(_A )
assert isinstance(_A,_A )
assert isinstance(exported_content[0],_A )
assert len(_A ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at",[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
],)
def __UpperCamelCase ( self : str,_A : Any,_A : List[str],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,lines=_A,orient=_A ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ : str = load_json(_A )
assert isinstance(_A,_A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A,"keys" ) and not hasattr(exported_content[0],"keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_A ) == 10
@pytest.mark.parametrize("lines, load_json_function",[(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : Tuple,_A : int,_A : Any,_A : Optional[int] ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,lines=_A,num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = load_json_function(_A )
assert isinstance(_A,_A )
assert isinstance(exported_content[0],_A )
assert len(_A ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at",[
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
],)
def __UpperCamelCase ( self : Union[str, Any],_A : Dict,_A : Optional[int],_A : Optional[int],_A : Dict,_A : int ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,lines=_A,orient=_A,num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ : Any = load_json(_A )
assert isinstance(_A,_A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A,"keys" ) and not hasattr(exported_content[0],"keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_A ) == 10
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
with pytest.raises(_A ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_A,_A,num_proc=0 )
@pytest.mark.parametrize("compression, extension",[("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : Optional[int],_A : str,_A : Union[str, Any],_A : Optional[Any],_A : List[Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = tmp_path_factory.mktemp("data" ) / F'test.json.{extension}'
SCREAMING_SNAKE_CASE_ : Any = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(_A,_A,compression=_A ).write()
with fsspec.open(_A,"rb",compression="infer" ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f.read()
with fsspec.open(_A,"rb",compression="infer" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
assert exported_content == original_content
| 18 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ (__A ):
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = proj_size
UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ )
UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output
UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase_ (nn.Module ):
def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
super().__init__()
UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : Optional[Any] = config.hidden_size
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ )
] )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str:
for block in self.blocks:
UpperCAmelCase_ : int = block(lowerCAmelCase_ )
return hidden_states
| 268 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase ( snake_case__ : Any ) -> int:
if is_torch_version('<' , '2.0.0' ) or not hasattr(snake_case__ , '_dynamo' ):
return False
return isinstance(snake_case__ , torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : bool = True ) -> List[str]:
UpperCamelCase : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase : Tuple = is_compiled_module(snake_case__ )
if is_compiled:
UpperCamelCase : List[str] = model
UpperCamelCase : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Any = model.module
if not keep_fpaa_wrapper:
UpperCamelCase : int = getattr(snake_case__ , 'forward' )
UpperCamelCase : List[str] = model.__dict__.pop('_original_forward' , snake_case__ )
if original_forward is not None:
while hasattr(snake_case__ , '__wrapped__' ):
UpperCamelCase : List[str] = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase : Optional[int] = forward
if getattr(snake_case__ , '_converted_to_transformer_engine' , snake_case__ ):
convert_model(snake_case__ , to_transformer_engine=snake_case__ )
if is_compiled:
UpperCamelCase : Any = model
UpperCamelCase : int = compiled_model
return model
def UpperCamelCase ( ) -> Optional[Any]:
PartialState().wait_for_everyone()
def UpperCamelCase ( snake_case__ : Any , snake_case__ : Optional[int] ) -> Union[str, Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(snake_case__ , snake_case__ )
elif PartialState().local_process_index == 0:
torch.save(snake_case__ , snake_case__ )
@contextmanager
def UpperCamelCase ( **snake_case__ : List[str] ) -> int:
for key, value in kwargs.items():
UpperCamelCase : Optional[Any] = str(snake_case__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase ( snake_case__ : Dict ) -> int:
if not hasattr(snake_case__ , '__qualname__' ) and not hasattr(snake_case__ , '__name__' ):
UpperCamelCase : Optional[Any] = getattr(snake_case__ , '__class__' , snake_case__ )
if hasattr(snake_case__ , '__qualname__' ):
return obj.__qualname__
if hasattr(snake_case__ , '__name__' ):
return obj.__name__
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : List[Any] ) -> Optional[int]:
for key, value in source.items():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : List[Any] = destination.setdefault(snake_case__ , {} )
merge_dicts(snake_case__ , snake_case__ )
else:
UpperCamelCase : Any = value
return destination
def UpperCamelCase ( snake_case__ : int = None ) -> bool:
if port is None:
UpperCamelCase : Union[str, Any] = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 103 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=19, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCamelCase : Any = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : List[str] = use_input_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : int = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : str = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : Any = num_choices
UpperCamelCase : Tuple = scope
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = EsmConfig(
vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=SCREAMING_SNAKE_CASE_, esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False}, )
return config
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Union[str, Any] = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE_ ).float()
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Tuple = config_and_inputs
UpperCamelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ : int = ()
UpperCAmelCase__ : List[str] = {} if is_torch_available() else {}
UpperCAmelCase__ : Optional[int] = False
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = EsmFoldModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> int:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Does not support attention outputs' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip('ESMFold only has one output format.' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@require_torch
class lowerCAmelCase_ ( a__ ):
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
UpperCamelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )['positions']
UpperCamelCase : int = torch.tensor([2.58_28, 0.79_93, -10.93_34], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.