code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
class _lowerCamelCase :
def __init__( self , lowerCAmelCase = 0 ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= key
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> list[str]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowerCAmelCase ) ^ key ) for ch in content]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> list[str]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowerCAmelCase ) ^ key ) for ch in content]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = 0 ) -> str:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE__: Union[str, Any]= ''''''
for ch in content:
ans += chr(ord(lowerCAmelCase ) ^ key )
return ans
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = 0 ) -> str:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE__: Optional[Any]= ''''''
for ch in content:
ans += chr(ord(lowerCAmelCase ) ^ key )
return ans
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = 0 ) -> bool:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
try:
with open(lowerCAmelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase , lowerCAmelCase ) )
except OSError:
return False
return True
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> bool:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
try:
with open(lowerCAmelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase , lowerCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 64 |
from __future__ import annotations
def a__ ( A__, A__ = None, A__ = None ):
if start is None:
SCREAMING_SNAKE_CASE_ : List[str] = 0
if end is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(A__ ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE_ : Tuple = (start + end) // 2
slowsort(A__, A__, A__ )
slowsort(A__, mid + 1, A__ )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sequence[mid], sequence[end]
slowsort(A__, A__, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 101 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Optional[torch.FloatTensor] =None
lowerCamelCase : torch.FloatTensor =None
lowerCamelCase : Optional[Tuple[torch.FloatTensor]] =None
lowerCamelCase : Optional[Tuple[torch.FloatTensor]] =None
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=5_12 , lowerCAmelCase : int="cls" , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=True , **lowerCAmelCase : int , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Dict = project_dim
__lowerCAmelCase : Dict = pooler_fn
__lowerCAmelCase : Any = learn_encoder
__lowerCAmelCase : Optional[Any] = use_attention_mask
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Tuple =[R"pooler", R"logit_scale"]
lowerCamelCase : List[str] =[R"position_ids", R"predictions.decoder.bias"]
lowerCamelCase : List[Any] ="roberta"
lowerCamelCase : List[str] =RobertaSeriesConfig
def __init__( self : Dict , lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(lowerCAmelCase )
__lowerCAmelCase : Any = XLMRobertaModel(lowerCAmelCase )
__lowerCAmelCase : int = nn.Linear(config.hidden_size , config.project_dim )
__lowerCAmelCase : Union[str, Any] = getattr(lowerCAmelCase , """has_pre_transformation""" , lowerCAmelCase )
if self.has_pre_transformation:
__lowerCAmelCase : Dict = nn.Linear(config.hidden_size , config.project_dim )
__lowerCAmelCase : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[torch.Tensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : int = self.base_model(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase , )
if self.has_pre_transformation:
__lowerCAmelCase : Union[str, Any] = outputs["""hidden_states"""][-2]
__lowerCAmelCase : str = self.pre_LN(lowerCAmelCase )
__lowerCAmelCase : str = self.transformation_pre(lowerCAmelCase )
return TransformationModelOutput(
projection_state=lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowerCAmelCase : Any = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 218 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =(DEISMultistepScheduler,)
lowerCamelCase : Optional[int] =(("num_inference_steps", 25),)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Dict=0 , **lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase : List[Any] = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : List[str] = self.dummy_sample
__lowerCAmelCase : List[Any] = 0.1 * sample
__lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : str = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = sample, sample
for t in range(lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : Optional[int] = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Optional[int]=0 , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = dict(self.forward_default_kwargs )
__lowerCAmelCase : int = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : int = self.dummy_sample
__lowerCAmelCase : Any = 0.1 * sample
__lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Tuple = self.get_scheduler_config()
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase : Any = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : int = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
if scheduler is None:
__lowerCAmelCase : str = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = 10
__lowerCAmelCase : Any = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
__lowerCAmelCase : Dict = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : str = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = self.dummy_sample
__lowerCAmelCase : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase , """set_timesteps""" ):
__lowerCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase : Any = scheduler.timesteps[5]
__lowerCAmelCase : Tuple = scheduler.timesteps[6]
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase : str = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
__lowerCAmelCase : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : int = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Tuple = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : Dict = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , algorithm_type="""deis""" , solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
__lowerCAmelCase : str = self.full_loop(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
assert not torch.isnan(lowerCAmelCase ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCAmelCase )
self.check_over_configs(lower_order_final=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=0 )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.full_loop()
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.full_loop(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config(thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Tuple = 10
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 218 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : int ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = b.T
snake_case__ : Union[str, Any] = np.sum(np.square(__magic_name__ ) , axis=1 )
snake_case__ : List[Any] = np.sum(np.square(__magic_name__ ) , axis=0 )
snake_case__ : Dict = np.matmul(__magic_name__ , __magic_name__ )
snake_case__ : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = x.reshape(-1 , 3 )
snake_case__ : Optional[int] = squared_euclidean_distance(__magic_name__ , __magic_name__ )
return np.argmin(__magic_name__ , axis=1 )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case__ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = np.array(__SCREAMING_SNAKE_CASE ) if clusters is not None else None
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : int = resample
snake_case__ : List[Any] = do_normalize
snake_case__ : Any = do_color_quantize
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Any = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
__SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : Any = rescale(image=__SCREAMING_SNAKE_CASE , scale=1 / 127.5 , data_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = image - 1
return image
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : str = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = size if size is not None else self.size
snake_case__ : int = get_size_dict(__SCREAMING_SNAKE_CASE )
snake_case__ : str = resample if resample is not None else self.resample
snake_case__ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ : Any = clusters if clusters is not None else self.clusters
snake_case__ : List[Any] = np.array(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
snake_case__ : Union[str, Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case__ : List[Any] = [self.normalize(image=__SCREAMING_SNAKE_CASE ) for image in images]
if do_color_quantize:
snake_case__ : int = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ : Optional[Any] = np.array(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = color_quantize(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ : List[Any] = images.shape[0]
snake_case__ : List[str] = images.reshape(__SCREAMING_SNAKE_CASE , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ : Optional[int] = list(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Dict = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
snake_case__ : Optional[int] = {"""input_ids""": images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 38 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__UpperCamelCase : Optional[Any] = '__DUMMY_TRANSFORMERS_USER__'
__UpperCamelCase : Optional[Any] = 'Dummy User'
__UpperCamelCase : Optional[Any] = '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'
__UpperCamelCase : Optional[Any] = 'https://hub-ci.huggingface.co'
__UpperCamelCase : List[Any] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
__UpperCamelCase : Dict = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
__UpperCamelCase : str = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , UpperCAmelCase )
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , UpperCAmelCase )
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] ):
"""simple docstring"""
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , UpperCAmelCase )
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
HfFolder.save_token(UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
return HfApi(endpoint=UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( UpperCAmelCase : HfApi ):
"""simple docstring"""
__lowerCamelCase : List[Any] = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase )
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
def _cleanup_repo(UpperCAmelCase : str ):
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
@contextmanager
def _temporary_repo(UpperCAmelCase : Tuple ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( UpperCAmelCase : HfApi , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__lowerCamelCase : Dict = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( UpperCAmelCase : HfApi , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__lowerCamelCase : Optional[Any] = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( UpperCAmelCase : HfApi , UpperCAmelCase : Dict , UpperCAmelCase : Dict ):
"""simple docstring"""
__lowerCamelCase : Dict = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__lowerCamelCase : Dict = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 519 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCamelCase( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : str = self.dummy_uncond_unet
_snake_case : Tuple = PNDMScheduler()
_snake_case : Dict = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
_snake_case : int = torch.manual_seed(0 )
_snake_case : int = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case : Optional[Any] = torch.manual_seed(0 )
_snake_case : Optional[Any] = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=SCREAMING_SNAKE_CASE__ )[0]
_snake_case : Any = image[0, -3:, -3:, -1]
_snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : Tuple = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Union[str, Any] = """google/ddpm-cifar10-32"""
_snake_case : List[Any] = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = PNDMScheduler()
_snake_case : Dict = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
_snake_case : Any = torch.manual_seed(0 )
_snake_case : Dict = pndm(generator=SCREAMING_SNAKE_CASE__ , output_type="""numpy""" ).images
_snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : Optional[int] = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 519 |
import sys
UpperCAmelCase_ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase ( A__ = N ) -> int:
_snake_case : Any = -sys.maxsize - 1
for i in range(len(A__ ) - 12 ):
_snake_case : Union[str, Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_snake_case : Dict = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 519 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple=13 , UpperCamelCase : List[Any]=2 , UpperCamelCase : Union[str, Any]=24 , UpperCamelCase : Tuple=16 , UpperCamelCase : str=True , UpperCamelCase : Any=True , UpperCamelCase : Any=32 , UpperCamelCase : Any=5 , UpperCamelCase : List[Any]=4 , UpperCamelCase : Optional[int]=37 , UpperCamelCase : str="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Tuple=10 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : List[str]=None , UpperCamelCase : Any=2 , UpperCamelCase : List[Any]=2 , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = patch_size
lowercase__ = max_length
lowercase__ = num_mel_bins
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = frequency_stride
lowercase__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase__ = (self.max_length - self.patch_size) // self.time_stride + 1
lowercase__ = frequency_out_dimension * time_out_dimension
lowercase__ = num_patches + 2
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, input_values, labels
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
lowercase__ = ASTModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
lowercase__
) = config_and_inputs
lowercase__ = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : int = False
def UpperCamelCase__ (self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ASTModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""input_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
@slow
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ASTModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
"""simple docstring"""
lowercase__ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
lowercase__ = torchaudio.load(lowerCamelCase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.default_feature_extractor
lowercase__ = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCamelCase )
lowercase__ = self.default_feature_extractor
lowercase__ = prepare_audio()
lowercase__ = audio.squeeze().numpy()
lowercase__ = feature_extractor(UpperCamelCase , sampling_rate=UpperCamelCase , return_tensors='''pt''' ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowercase__ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 ) )
| 460 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any]=None ):
'''simple docstring'''
if subparsers is not None:
A: Optional[Any] = subparsers.add_parser("""env""" )
else:
A: Union[str, Any] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
A: Optional[int] = torch.__version__
A: int = torch.cuda.is_available()
A: List[Any] = is_xpu_available()
A: List[str] = is_npu_available()
A: Dict = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase__ ):
A: str = load_config_from_file(args.config_file ).to_dict()
A: Optional[Any] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(lowerCamelCase__ ),
"""PyTorch NPU available""": str(lowerCamelCase__ ),
"""System RAM""": f'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
A: Dict = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f'- {prop}: {val}' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
A: Dict = (
"""\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else f'\t{accelerate_config}'
)
print(lowerCamelCase__ )
A: Any = accelerate_config
return info
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: List[Any] = env_command_parser()
A: Dict = parser.parse_args()
env_command(lowerCamelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 135 | 0 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 344 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger('transformers.models.encodec')
UpperCAmelCase = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
UpperCAmelCase = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
UpperCAmelCase = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
UpperCAmelCase = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
UpperCAmelCase = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
UpperCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCAmelCase = []
UpperCAmelCase = []
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase, lowerCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(f'{name} was ignored' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase, lowerCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
lowerCAmelCase = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase = """weight_v"""
elif "weight_ih_l0" in name:
lowerCAmelCase = """weight_ih_l0"""
elif "weight_hh_l0" in name:
lowerCAmelCase = """weight_hh_l0"""
elif "bias_ih_l0" in name:
lowerCAmelCase = """bias_ih_l0"""
elif "bias_hh_l0" in name:
lowerCAmelCase = """bias_hh_l0"""
elif "weight_ih_l1" in name:
lowerCAmelCase = """weight_ih_l1"""
elif "weight_hh_l1" in name:
lowerCAmelCase = """weight_hh_l1"""
elif "bias_ih_l1" in name:
lowerCAmelCase = """bias_ih_l1"""
elif "bias_hh_l1" in name:
lowerCAmelCase = """bias_hh_l1"""
elif "bias" in name:
lowerCAmelCase = """bias"""
elif "weight" in name:
lowerCAmelCase = """weight"""
elif "running_mean" in name:
lowerCAmelCase = """running_mean"""
elif "running_var" in name:
lowerCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase = """num_batches_tracked"""
else:
lowerCAmelCase = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32_000
lowerCAmelCase = 2_048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48_000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = """time_group_norm"""
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
lowerCAmelCase = EncodecModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint["""best_state"""]
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCAmelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 344 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case_ : List[str] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : int = "sgugger/tiny-distilbert-classification"
snake_case_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : List[Any] = "sshleifer/tiny-gpt2"
snake_case_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case_ : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Union[str, Any] = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Tuple = "sshleifer/tiny-gpt2"
snake_case_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Union[str, Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case_ : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = "patrickvonplaten/t5-tiny-random"
snake_case_ : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
snake_case_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Dict = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : int = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Union[str, Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) ).exists() )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "sequential" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "cumulative" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "current" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) ).exists() )
| 568 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase : int = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCAmelCase__ ( _a : Dict , _a : Optional[int]=None , _a : Union[str, Any]=None , _a : List[Any]=None ):
snake_case_ : Optional[Any] = True
while ask_again:
snake_case_ : str = input(_a )
try:
if default is not None and len(_a ) == 0:
return default
return convert_value(_a ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_a )
def lowerCAmelCase__ ( _a : Dict , _a : str=[] , _a : Union[str, Any]=None , _a : Optional[int]=0 ):
snake_case_ : List[Any] = BulletMenu(_a , _a )
snake_case_ : Union[str, Any] = menu.run(default_choice=_a )
return convert_value(_a ) if convert_value is not None else result
def lowerCAmelCase__ ( _a : List[str] ):
snake_case_ : str = int(_a )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def lowerCAmelCase__ ( _a : int ):
snake_case_ : str = int(_a )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : int = int(_a )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase__ ( _a : Dict ):
snake_case_ : str = int(_a )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def lowerCAmelCase__ ( _a : int ):
snake_case_ : Optional[int] = int(_a )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def lowerCAmelCase__ ( _a : str ):
return {"yes": True, "no": False}[value.lower()]
class UpperCAmelCase_ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : Tuple = super()._format_usage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Any = usage.replace("<command> [<args>] " , "" )
return usage
| 568 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :Any = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[Any] = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_lowerCAmelCase :Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=99 , lowercase__=64 , lowercase__=5 , lowercase__=4 , lowercase__=64 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def _UpperCamelCase ( self ) -> Union[str, Any]:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> Tuple:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = MPNetModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = MPNetForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MPNetForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : Any = MPNetForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : str = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : int = True
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : int = MPNetModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Tuple = MPNetModel.from_pretrained('microsoft/mpnet-base' )
SCREAMING_SNAKE_CASE : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase__ )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
| 179 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = XGLMTokenizer
_a = XGLMTokenizerFast
_a = True
_a = True
def __lowercase ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : int ):
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def __lowercase ( self : Any ):
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase ) , 1008 )
def __lowercase ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __lowercase ( self : str ):
lowerCAmelCase = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __lowercase ( self : int ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __lowercase ( self : Tuple ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name )
lowerCAmelCase = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase )
lowerCAmelCase = pickle.dumps(lowerCAmelCase )
pickle.loads(lowerCAmelCase )
def __lowercase ( self : List[str] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def __lowercase ( self : List[str] ):
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowerCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def __lowercase ( self : Union[str, Any] ):
# fmt: off
lowerCAmelCase = {
"""input_ids""": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""facebook/xglm-564M""" , padding=lowerCAmelCase , )
| 169 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'efficientformer'
def __init__( self : Any , lowerCAmelCase : List[int] = [3, 2, 6, 4] , lowerCAmelCase : List[int] = [48, 96, 224, 448] , lowerCAmelCase : List[bool] = [True, True, True, True] , lowerCAmelCase : int = 448 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 4 , lowerCAmelCase : int = 7 , lowerCAmelCase : int = 5 , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 4 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : float = 1e-5 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 1e-12 , lowerCAmelCase : int = 224 , lowerCAmelCase : float = 1e-05 , **lowerCAmelCase : int , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = hidden_sizes
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = depths
lowerCAmelCase = mlp_expansion_ratio
lowerCAmelCase = downsamples
lowerCAmelCase = dim
lowerCAmelCase = key_dim
lowerCAmelCase = attention_ratio
lowerCAmelCase = resolution
lowerCAmelCase = pool_size
lowerCAmelCase = downsample_patch_size
lowerCAmelCase = downsample_stride
lowerCAmelCase = downsample_pad
lowerCAmelCase = drop_path_rate
lowerCAmelCase = num_metaad_blocks
lowerCAmelCase = distillation
lowerCAmelCase = use_layer_scale
lowerCAmelCase = layer_scale_init_value
lowerCAmelCase = image_size
lowerCAmelCase = batch_norm_eps
| 169 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : List[Any] = get_tests_dir('fixtures')
__A : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
__A : int = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =0
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( self )-> str:
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( self )-> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ =WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
lowerCamelCase_ =WavaVecaFeatureExtractor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
lowerCamelCase_ =json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( self )-> Any:
with self.assertRaisesRegex(
UpperCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained("""bert-base""" )
def _snake_case ( self )-> Optional[int]:
with self.assertRaisesRegex(
UpperCamelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(UpperCamelCase__ , revision="""aaaaaa""" )
def _snake_case ( self )-> int:
with self.assertRaisesRegex(
UpperCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _snake_case ( self )-> int:
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase__ )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def _snake_case ( self )-> List[str]:
try:
AutoConfig.register("""custom""" , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ =CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase__ )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self )-> Any:
class _SCREAMING_SNAKE_CASE ( lowercase_):
_UpperCamelCase:Optional[Any] = True
try:
AutoConfig.register("""custom""" , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 710 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Dict = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[Any] = "yolos"
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=[512, 864] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , )-> Tuple:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =qkv_bias
lowerCamelCase_ =num_detection_tokens
lowerCamelCase_ =use_mid_position_embeddings
lowerCamelCase_ =auxiliary_loss
# Hungarian matcher
lowerCamelCase_ =class_cost
lowerCamelCase_ =bbox_cost
lowerCamelCase_ =giou_cost
# Loss coefficients
lowerCamelCase_ =bbox_loss_coefficient
lowerCamelCase_ =giou_loss_coefficient
lowerCamelCase_ =eos_coefficient
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[Any] = version.parse("1.11")
@property
def _snake_case ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self )-> float:
return 1E-4
@property
def _snake_case ( self )-> int:
return 12
| 75 | 0 |
def a__ ( A__, A__ ):
def get_matched_characters(A__, A__ ) -> str:
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Any = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(max(0, i - limit ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = F'''{_stra[0:_stra.index(A__ )]} {_stra[_stra.index(A__ ) + 1:]}'''
return "".join(A__ )
# matching characters
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_matched_characters(A__, A__ )
SCREAMING_SNAKE_CASE_ : int = get_matched_characters(A__, A__ )
SCREAMING_SNAKE_CASE_ : Any = len(A__ )
# transposition
SCREAMING_SNAKE_CASE_ : Optional[int] = (
len([(ca, ca) for ca, ca in zip(A__, A__ ) if ca != ca] ) // 2
)
if not match_count:
SCREAMING_SNAKE_CASE_ : Dict = 0.0
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
1
/ 3
* (
match_count / len(A__ )
+ match_count / len(A__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 101 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 101 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_A = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str=None ):
"""simple docstring"""
lowerCAmelCase_ = XLNetConfig.from_json_file(__lowerCAmelCase )
lowerCAmelCase_ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowerCAmelCase_ = finetuning_task
lowerCAmelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCAmelCase_ = XLNetForSequenceClassification(__lowerCAmelCase )
elif "squad" in finetuning_task:
lowerCAmelCase_ = finetuning_task
lowerCAmelCase_ = XLNetForQuestionAnswering(__lowerCAmelCase )
else:
lowerCAmelCase_ = XLNetLMHeadModel(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
lowerCAmelCase_ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(__lowerCAmelCase )}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F"""Save configuration file to {os.path.abspath(__lowerCAmelCase )}""" )
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_A = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 713 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ = torchvision.models.resnetaaa(pretrained=_UpperCamelCase )
lowerCAmelCase_ = list(model.children() )[:-2]
lowerCAmelCase_ = nn.Sequential(*_UpperCamelCase )
lowerCAmelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __a ( self , _UpperCamelCase ) -> Dict:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCAmelCase_ = self.pool(self.model(_UpperCamelCase ) )
lowerCAmelCase_ = torch.flatten(_UpperCamelCase , start_dim=2 )
lowerCAmelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = [json.loads(_UpperCamelCase ) for l in open(_UpperCamelCase )]
lowerCAmelCase_ = os.path.dirname(_UpperCamelCase )
lowerCAmelCase_ = tokenizer
lowerCAmelCase_ = labels
lowerCAmelCase_ = len(_UpperCamelCase )
lowerCAmelCase_ = max_seq_length
lowerCAmelCase_ = transforms
def __len__( self ) -> Any:
return len(self.data )
def __getitem__( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=_UpperCamelCase ) )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase_ = sentence[: self.max_seq_length]
lowerCAmelCase_ = torch.zeros(self.n_classes )
lowerCAmelCase_ = 1
lowerCAmelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
lowerCAmelCase_ = self.transforms(_UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __a ( self ) -> str:
lowerCAmelCase_ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowerCamelCase__ ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = [len(row["sentence"] ) for row in batch]
lowerCAmelCase_ , lowerCAmelCase_ = len(__lowerCAmelCase ), max(__lowerCAmelCase )
lowerCAmelCase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
lowerCAmelCase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
lowerCAmelCase_ = input_row["sentence"]
lowerCAmelCase_ = 1
lowerCAmelCase_ = torch.stack([row["image"] for row in batch] )
lowerCAmelCase_ = torch.stack([row["label"] for row in batch] )
lowerCAmelCase_ = torch.stack([row["image_start_token"] for row in batch] )
lowerCAmelCase_ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 279 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
# TODO Update this
_lowercase = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase_ ( A ):
__lowerCamelCase = "esm"
def __init__( self , __A=None , __A=None , __A=None , __A=768 , __A=12 , __A=12 , __A=3_072 , __A=0.1 , __A=0.1 , __A=1_026 , __A=0.02 , __A=1e-1_2 , __A="absolute" , __A=True , __A=None , __A=False , __A=False , __A=None , __A=None , **__A , ) -> int:
super().__init__(pad_token_id=__A , mask_token_id=__A , **__A )
SCREAMING_SNAKE_CASE_ : Any =vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =hidden_size
SCREAMING_SNAKE_CASE_ : int =num_hidden_layers
SCREAMING_SNAKE_CASE_ : int =num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] =intermediate_size
SCREAMING_SNAKE_CASE_ : int =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] =initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_ : str =position_embedding_type
SCREAMING_SNAKE_CASE_ : Union[str, Any] =use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] =emb_layer_norm_before
SCREAMING_SNAKE_CASE_ : Tuple =token_dropout
SCREAMING_SNAKE_CASE_ : List[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =EsmFoldConfig()
elif isinstance(__A , __A ):
SCREAMING_SNAKE_CASE_ : List[Any] =EsmFoldConfig(**__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE_ : Tuple =get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] =vocab_list
else:
SCREAMING_SNAKE_CASE_ : List[str] =None
SCREAMING_SNAKE_CASE_ : Optional[int] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , __A ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str =super().to_dict()
if isinstance(self.esmfold_config , __A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.esmfold_config.to_dict()
return output
@dataclass
class lowercase_ :
__lowerCamelCase = None
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = 0
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = 1_2_8
__lowerCamelCase = None
def _snake_case ( self ) -> Optional[Any]:
if self.trunk is None:
SCREAMING_SNAKE_CASE_ : Tuple =TrunkConfig()
elif isinstance(self.trunk , __A ):
SCREAMING_SNAKE_CASE_ : List[str] =TrunkConfig(**self.trunk )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : int =asdict(self )
SCREAMING_SNAKE_CASE_ : Tuple =self.trunk.to_dict()
return output
@dataclass
class lowercase_ :
__lowerCamelCase = 4_8
__lowerCamelCase = 1_0_2_4
__lowerCamelCase = 1_2_8
__lowerCamelCase = 3_2
__lowerCamelCase = 3_2
__lowerCamelCase = 3_2
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = False
__lowerCamelCase = 4
__lowerCamelCase = 1_2_8
__lowerCamelCase = None
def _snake_case ( self ) -> int:
if self.structure_module is None:
SCREAMING_SNAKE_CASE_ : Dict =StructureModuleConfig()
elif isinstance(self.structure_module , __A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
SCREAMING_SNAKE_CASE_ : Tuple =self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE_ : str =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : str =asdict(self )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.structure_module.to_dict()
return output
@dataclass
class lowercase_ :
__lowerCamelCase = 3_8_4
__lowerCamelCase = 1_2_8
__lowerCamelCase = 1_6
__lowerCamelCase = 1_2_8
__lowerCamelCase = 1_2
__lowerCamelCase = 4
__lowerCamelCase = 8
__lowerCamelCase = 0.1
__lowerCamelCase = 8
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = 7
__lowerCamelCase = 1_0
__lowerCamelCase = 1E-8
__lowerCamelCase = 1E5
def _snake_case ( self ) -> str:
return asdict(self )
def SCREAMING_SNAKE_CASE_ ( ) -> int:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 443 | 0 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """autoformer"""
snake_case_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Optional[int] ,A : Optional[int] = None ,A : Optional[int] = None ,A : str = "student_t" ,A : str = "nll" ,A : int = 1 ,A : List[int] = [1, 2, 3, 4, 5, 6, 7] ,A : bool = True ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : Optional[List[int]] = None ,A : Optional[List[int]] = None ,A : int = 64 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : int = 32 ,A : int = 32 ,A : str = "gelu" ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : int = 100 ,A : float = 0.0_2 ,A : bool = True ,A : List[Any]=True ,A : int = 10 ,A : int = 25 ,A : int = 3 ,**A : List[str] ,):
'''simple docstring'''
# time series specific configuration
UpperCAmelCase__ : List[str] = prediction_length
UpperCAmelCase__ : int = context_length if context_length is not None else prediction_length
UpperCAmelCase__ : List[Any] = distribution_output
UpperCAmelCase__ : Optional[int] = loss
UpperCAmelCase__ : Optional[int] = input_size
UpperCAmelCase__ : Dict = num_time_features
UpperCAmelCase__ : Union[str, Any] = lags_sequence
UpperCAmelCase__ : Dict = scaling
UpperCAmelCase__ : Tuple = num_dynamic_real_features
UpperCAmelCase__ : List[str] = num_static_real_features
UpperCAmelCase__ : Union[str, Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : List[str] = cardinality
else:
UpperCAmelCase__ : Dict = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : Optional[int] = embedding_dimension
else:
UpperCAmelCase__ : Tuple = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ : str = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ : Optional[int] = d_model
UpperCAmelCase__ : Optional[Any] = encoder_attention_heads
UpperCAmelCase__ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase__ : List[Any] = encoder_ffn_dim
UpperCAmelCase__ : Optional[int] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = encoder_layers
UpperCAmelCase__ : List[Any] = decoder_layers
UpperCAmelCase__ : Union[str, Any] = dropout
UpperCAmelCase__ : List[str] = attention_dropout
UpperCAmelCase__ : Dict = activation_dropout
UpperCAmelCase__ : str = encoder_layerdrop
UpperCAmelCase__ : Union[str, Any] = decoder_layerdrop
UpperCAmelCase__ : Union[str, Any] = activation_function
UpperCAmelCase__ : Tuple = init_std
UpperCAmelCase__ : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase__ : int = label_length
UpperCAmelCase__ : Optional[Any] = moving_average
UpperCAmelCase__ : Optional[int] = autocorrelation_factor
super().__init__(is_encoder_decoder=A ,**A )
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 194 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __lowercase ( __lowerCamelCase ):
def __init__( self : List[str] ,*A : Optional[int] ,**A : List[str] ):
'''simple docstring'''
super().__init__(*A ,**A )
self.check_model_type(A )
def __lowercase ( self : Optional[int] ,A : Dict=None ,A : Optional[int]=None ,A : Tuple=None ,**A : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = {}, {}
if padding is not None:
UpperCAmelCase__ : Union[str, Any] = padding
if truncation is not None:
UpperCAmelCase__ : List[str] = truncation
if top_k is not None:
UpperCAmelCase__ : List[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] ,A : Union["Image.Image", str] ,A : str = None ,**A : Union[str, Any] ):
'''simple docstring'''
if isinstance(A ,(Image.Image, str) ) and isinstance(A ,A ):
UpperCAmelCase__ : Any = {"""image""": image, """question""": question}
else:
UpperCAmelCase__ : Dict = image
UpperCAmelCase__ : Dict = super().__call__(A ,**A )
return results
def __lowercase ( self : Optional[int] ,A : Optional[Any] ,A : Optional[Any]=False ,A : List[Any]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = load_image(inputs["""image"""] )
UpperCAmelCase__ : Optional[Any] = self.tokenizer(
inputs["""question"""] ,return_tensors=self.framework ,padding=A ,truncation=A )
UpperCAmelCase__ : int = self.image_processor(images=A ,return_tensors=self.framework )
model_inputs.update(A )
return model_inputs
def __lowercase ( self : Dict ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model(**A )
return model_outputs
def __lowercase ( self : int ,A : List[Any] ,A : List[str]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
UpperCAmelCase__ : Any = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ : Optional[int] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = probs.topk(A )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase__ : List[str] = scores.tolist()
UpperCAmelCase__ : Optional[int] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(A ,A )]
| 194 | 1 |
from __future__ import annotations
class lowercase_ :
def __init__( self : List[str] , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text, pattern
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = len(snake_case__ ), len(snake_case__ )
def __a ( self : int , snake_case__ : str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __a ( self : Dict , snake_case__ : int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE_ = self.mismatch_in_text(snake_case__ )
if mismatch_index == -1:
positions.append(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE: str = '''ABAABA'''
SCREAMING_SNAKE_CASE: Tuple = '''AB'''
SCREAMING_SNAKE_CASE: List[Any] = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE: Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 360 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE: Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , **snake_case__ : str ):
"""simple docstring"""
super().__init__(**snake_case__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : str , snake_case__ : Union[str, List[str], "Image", List["Image"]] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def __a ( self : str , **snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __a ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None , snake_case__ : List[str]="This is a photo of {}." ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(snake_case__ )
SCREAMING_SNAKE_CASE_ = self.image_processor(images=[image] , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_ = candidate_labels
SCREAMING_SNAKE_CASE_ = [hypothesis_template.format(snake_case__ ) for x in candidate_labels]
SCREAMING_SNAKE_CASE_ = self.tokenizer(snake_case__ , return_tensors=self.framework , padding=snake_case__ )
SCREAMING_SNAKE_CASE_ = [text_inputs]
return inputs
def __a ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_inputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , snake_case__ ):
SCREAMING_SNAKE_CASE_ = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE_ = text_inputs[0][0]
SCREAMING_SNAKE_CASE_ = self.model(**snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE_ = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_outputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE_ = model_outputs['logits'][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ = logits.softmax(dim=-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = probs.tolist()
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE_ = [scores]
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_ = stable_softmax(snake_case__ , axis=-1 )
SCREAMING_SNAKE_CASE_ = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
SCREAMING_SNAKE_CASE_ = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case__ , snake_case__ ) , key=lambda snake_case__ : -x[0] )
]
return result
| 360 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
SCREAMING_SNAKE_CASE__ = """▁"""
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Optional[Any] = ['input_ids', 'attention_mask']
lowerCAmelCase__ : int = BarthezTokenizer
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Union[str, Any]="</s>" , _UpperCAmelCase : List[str]="</s>" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : str="<pad>" , _UpperCAmelCase : Optional[int]="<mask>" , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 701 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Any ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoModelForPreTraining.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : str ) -> List[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4_4_1_0 )
__SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4_4_1_0 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4_4_1_0 )
__SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4_4_1_0 )
| 682 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[Any] = AutoencoderKL
snake_case__ : Optional[Any] = "sample"
snake_case__ : Optional[Any] = 1E-2
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (3_2, 3_2)
__SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
return {"sample": image}
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
return (3, 3_2, 3_2)
@property
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
return (3, 3_2, 3_2)
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
pass
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
# enable deterministic behavior for gradient checkpointing
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
assert not model.is_gradient_checkpointing and model.training
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__SCREAMING_SNAKE_CASE = torch.randn_like(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCAmelCase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__SCREAMING_SNAKE_CASE = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__SCREAMING_SNAKE_CASE = dict(model.named_parameters() )
__SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase__ )
model.eval()
if torch_device == "mps":
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample
__SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__SCREAMING_SNAKE_CASE = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
__SCREAMING_SNAKE_CASE = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) )
@slow
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any:
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy"""
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase__ : Any=False ) -> List[str]:
__SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa
__SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__ : Optional[Any]=False ) -> Tuple:
__SCREAMING_SNAKE_CASE = "fp16" if fpaa else None
__SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa
__SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained(
UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , )
model.to(UpperCAmelCase__ ).eval()
return model
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase__ )
return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
__SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample
assert sample.shape == image.shape
__SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample
assert sample.shape == image.shape
__SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict:
__SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
__SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).sample
assert sample.shape == image.shape
__SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> str:
__SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
__SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
__SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu()
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Dict:
__SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
__SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5E-3 )
@parameterized.expand([(1_3,), (1_6,), (2_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-1 )
@parameterized.expand([(1_3,), (1_6,), (3_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
__SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
__SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model.encode(UpperCAmelCase__ ).latent_dist
__SCREAMING_SNAKE_CASE = dist.sample(generator=UpperCAmelCase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu()
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
| 682 | 1 |
"""simple docstring"""
def A_ ( __lowercase ):
assert column_title.isupper()
UpperCamelCase_ : List[str] =0
UpperCamelCase_ : List[Any] =len(__A ) - 1
UpperCamelCase_ : Any =0
while index >= 0:
UpperCamelCase_ : Tuple =(ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class a__ ( A__ ):
UpperCAmelCase__ = '''dpr'''
def __init__( self :Dict , _lowerCamelCase :Optional[Any]=30_522 , _lowerCamelCase :Tuple=768 , _lowerCamelCase :List[Any]=12 , _lowerCamelCase :List[str]=12 , _lowerCamelCase :Dict=3_072 , _lowerCamelCase :Tuple="gelu" , _lowerCamelCase :Union[str, Any]=0.1 , _lowerCamelCase :Optional[Any]=0.1 , _lowerCamelCase :int=512 , _lowerCamelCase :Optional[Any]=2 , _lowerCamelCase :List[str]=0.02 , _lowerCamelCase :List[Any]=1E-1_2 , _lowerCamelCase :Union[str, Any]=0 , _lowerCamelCase :str="absolute" , _lowerCamelCase :int = 0 , **_lowerCamelCase :Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =vocab_size
UpperCamelCase_ : int =hidden_size
UpperCamelCase_ : List[Any] =num_hidden_layers
UpperCamelCase_ : str =num_attention_heads
UpperCamelCase_ : Union[str, Any] =hidden_act
UpperCamelCase_ : str =intermediate_size
UpperCamelCase_ : Dict =hidden_dropout_prob
UpperCamelCase_ : List[Any] =attention_probs_dropout_prob
UpperCamelCase_ : Union[str, Any] =max_position_embeddings
UpperCamelCase_ : Dict =type_vocab_size
UpperCamelCase_ : Union[str, Any] =initializer_range
UpperCamelCase_ : Dict =layer_norm_eps
UpperCamelCase_ : Optional[int] =projection_dim
UpperCamelCase_ : Dict =position_embedding_type
| 395 | 0 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ = 10**9 ) -> int:
"""simple docstring"""
A = 1
A = 2
A = 0
A = 0
A = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 690 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> list[int]: # This function is recursive
"""simple docstring"""
A = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A = array[0]
A = False
A = 1
A = []
while not is_found and i < array_length:
if array[i] < pivot:
A = True
A = [element for element in array[i:] if element >= array[i]]
A = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
A = temp_array
else:
i += 1
A = [element for element in array[1:] if element >= pivot]
A = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class A__ ( _lowerCamelCase):
A_ : Optional[Any] = 'deformable_detr'
A_ : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=3_00 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=3_00 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.25 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__lowerCAmelCase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = backbone_config.get('model_type' )
__lowerCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase : int = config_class.from_dict(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = use_timm_backbone
__lowerCAmelCase : Any = backbone_config
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : Optional[Any] = num_queries
__lowerCAmelCase : Optional[Any] = max_position_embeddings
__lowerCAmelCase : Optional[int] = d_model
__lowerCAmelCase : Optional[Any] = encoder_ffn_dim
__lowerCAmelCase : Tuple = encoder_layers
__lowerCAmelCase : Union[str, Any] = encoder_attention_heads
__lowerCAmelCase : int = decoder_ffn_dim
__lowerCAmelCase : List[Any] = decoder_layers
__lowerCAmelCase : Optional[int] = decoder_attention_heads
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : Optional[Any] = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Union[str, Any] = activation_function
__lowerCAmelCase : Optional[Any] = init_std
__lowerCAmelCase : List[str] = init_xavier_std
__lowerCAmelCase : List[str] = encoder_layerdrop
__lowerCAmelCase : Any = auxiliary_loss
__lowerCAmelCase : Dict = position_embedding_type
__lowerCAmelCase : str = backbone
__lowerCAmelCase : int = use_pretrained_backbone
__lowerCAmelCase : int = dilation
# deformable attributes
__lowerCAmelCase : List[Any] = num_feature_levels
__lowerCAmelCase : Dict = encoder_n_points
__lowerCAmelCase : Optional[Any] = decoder_n_points
__lowerCAmelCase : str = two_stage
__lowerCAmelCase : int = two_stage_num_proposals
__lowerCAmelCase : Tuple = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
__lowerCAmelCase : str = class_cost
__lowerCAmelCase : List[str] = bbox_cost
__lowerCAmelCase : List[str] = giou_cost
# Loss coefficients
__lowerCAmelCase : Any = mask_loss_coefficient
__lowerCAmelCase : Any = dice_loss_coefficient
__lowerCAmelCase : Optional[Any] = bbox_loss_coefficient
__lowerCAmelCase : Tuple = giou_loss_coefficient
__lowerCAmelCase : List[str] = eos_coefficient
__lowerCAmelCase : str = focal_alpha
__lowerCAmelCase : Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCAmelCase : Optional[int] = self.backbone_config.to_dict()
__lowerCAmelCase : Tuple = self.__class__.model_type
return output
| 707 |
"""simple docstring"""
class A__ :
def __init__( self ):
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Any = 0
__lowerCAmelCase : List[Any] = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if vertex not in self.adjacency:
__lowerCAmelCase : Dict = {}
self.num_vertices += 1
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.add_vertex(_SCREAMING_SNAKE_CASE )
self.add_vertex(_SCREAMING_SNAKE_CASE )
if head == tail:
return
__lowerCAmelCase : Union[str, Any] = weight
__lowerCAmelCase : str = weight
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.get_edges()
for edge in edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : List[str] = list(edges[i] )
edges.sort(key=lambda _SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowerCAmelCase : Dict = edges[i][2] + 1
for edge in edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = edge
__lowerCAmelCase : Union[str, Any] = weight
__lowerCAmelCase : Optional[int] = weight
def __str__( self ):
__lowerCAmelCase : List[Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowerCAmelCase : str = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('\n' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCamelCase ( self ):
return self.adjacency.keys()
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : List[str] = Graph()
if vertices is None:
__lowerCAmelCase : int = []
if edges is None:
__lowerCAmelCase : Optional[int] = []
for vertex in vertices:
g.add_vertex(_SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*_SCREAMING_SNAKE_CASE )
return g
class A__ :
def __init__( self ):
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : Optional[int] = {}
def __len__( self ):
return len(self.parent )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if item in self.parent:
return self.find(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = item
__lowerCAmelCase : int = 0
return item
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if item not in self.parent:
return self.make_set(_SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
__lowerCAmelCase : Dict = self.find(self.parent[item] )
return self.parent[item]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.find(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.find(_SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowerCAmelCase : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowerCAmelCase : str = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowerCAmelCase : Optional[int] = roota
return roota
return None
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = graph.num_vertices
__lowerCAmelCase : List[Any] = Graph.UnionFind()
__lowerCAmelCase : Tuple = []
while num_components > 1:
__lowerCAmelCase : str = {}
for vertex in graph.get_vertices():
__lowerCAmelCase : Union[str, Any] = -1
__lowerCAmelCase : int = graph.get_edges()
for edge in edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = edge
__lowerCAmelCase : Optional[int] = union_find.find(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = union_find.find(_SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCAmelCase : List[str] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = cheap_edge[vertex]
if union_find.find(_SCREAMING_SNAKE_CASE ) != union_find.find(_SCREAMING_SNAKE_CASE ):
union_find.union(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
__lowerCAmelCase : List[str] = num_components - 1
__lowerCAmelCase : Union[str, Any] = Graph.build(edges=_SCREAMING_SNAKE_CASE )
return mst
| 549 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE_ = (3, 9, -1_1, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE_ = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowerCAmelCase :
"""simple docstring"""
_A = 42
_A = 42
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , _A ) -> None:
__a : Node | None = None
for i in sorted(_A , reverse=_A ):
__a : Optional[Any] = Node(_A , self.head )
def __iter__( self ) -> Iterator[int]:
__a : Any = self.head
while node:
yield node.data
__a : Union[str, Any] = node.next_node
def __len__( self ) -> int:
return sum(1 for _ in self )
def __str__( self ) -> str:
return " -> ".join([str(_A ) for node in self] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return SortedLinkedList(list(SCREAMING_SNAKE_CASE__ ) + list(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 597 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : List[str] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a , __a : Tuple = emb.weight.shape
__a : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__a : List[str] = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="facebook/mbart-large-en-ro" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
__a : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = state_dict['encoder.embed_tokens.weight'].shape[0]
__a : Dict = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , vocab_size=SCREAMING_SNAKE_CASE__ )
if mbart_aa and finetuned:
__a : str = 'relu'
__a : List[Any] = state_dict['decoder.embed_tokens.weight']
__a : Dict = MBartForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if finetuned:
__a : Tuple = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 597 | 1 |
"""simple docstring"""
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def snake_case ( UpperCamelCase__ : str ) -> Dict:
return data[1:] + data[0]
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> List[Any]:
lowerCamelCase : Union[str, Any] = """"""
for i in range(len(UpperCamelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = int("""0b""" + data[0] + data[-1] , 2 )
lowerCamelCase : Optional[int] = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ) -> Union[str, Any]:
lowerCamelCase : int = message[:4]
lowerCamelCase : int = message[4:]
lowerCamelCase : List[str] = apply_table(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = xor(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = apply_sbox(UpperCamelCase__ , temp[:4] ) # noqa: E741
lowerCamelCase : List[str] = apply_sbox(UpperCamelCase__ , temp[4:] )
lowerCamelCase : str = """0""" * (2 - len(UpperCamelCase__ )) + l # noqa: E741
lowerCamelCase : List[Any] = """0""" * (2 - len(UpperCamelCase__ )) + r
lowerCamelCase : Optional[Any] = apply_table(l + r , UpperCamelCase__ )
lowerCamelCase : Dict = xor(UpperCamelCase__ , UpperCamelCase__ )
return temp + right
if __name__ == "__main__":
__lowerCamelCase :Dict = input('Enter 10 bit key: ')
__lowerCamelCase :int = input('Enter 8 bit message: ')
__lowerCamelCase :List[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
__lowerCamelCase :Any = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__lowerCamelCase :List[str] = [2, 4, 3, 1]
__lowerCamelCase :List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__lowerCamelCase :Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
__lowerCamelCase :str = [4, 1, 2, 3, 2, 3, 4, 1]
__lowerCamelCase :List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowerCamelCase :str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowerCamelCase :int = apply_table(key, paa_table)
__lowerCamelCase :str = temp[:5]
__lowerCamelCase :Tuple = temp[5:]
__lowerCamelCase :Union[str, Any] = left_shift(left)
__lowerCamelCase :List[str] = left_shift(right)
__lowerCamelCase :Optional[Any] = apply_table(left + right, pa_table)
__lowerCamelCase :str = left_shift(left)
__lowerCamelCase :Optional[int] = left_shift(right)
__lowerCamelCase :Union[str, Any] = left_shift(left)
__lowerCamelCase :int = left_shift(right)
__lowerCamelCase :Union[str, Any] = apply_table(left + right, pa_table)
# encryption
__lowerCamelCase :Union[str, Any] = apply_table(message, IP)
__lowerCamelCase :Any = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :int = temp[4:] + temp[:4]
__lowerCamelCase :List[str] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :Tuple = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
__lowerCamelCase :Union[str, Any] = apply_table(CT, IP)
__lowerCamelCase :Union[str, Any] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :int = temp[4:] + temp[:4]
__lowerCamelCase :Optional[int] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :List[str] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 706 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( lowerCAmelCase_ , unittest.TestCase ):
a_ : int = UnCLIPImageVariationPipeline
a_ : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
a_ : Optional[int] = IMAGE_VARIATION_BATCH_PARAMS
a_ : Optional[Any] = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
a_ : int = False
@property
def _UpperCamelCase ( self : Dict ):
return 32
@property
def _UpperCamelCase ( self : Dict ):
return 32
@property
def _UpperCamelCase ( self : Union[str, Any] ):
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ):
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Union[str, Any] ):
return 1_00
@property
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _UpperCamelCase ( self : Any ):
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__a )
@property
def _UpperCamelCase ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCamelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__a )
@property
def _UpperCamelCase ( self : Dict ):
torch.manual_seed(0 )
lowerCamelCase__ = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCamelCase__ = UnCLIPTextProjModel(**__a )
return model
@property
def _UpperCamelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase__ = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCamelCase__ = UNetaDConditionModel(**__a )
return model
@property
def _UpperCamelCase ( self : Optional[Any] ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _UpperCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _UpperCamelCase ( self : Any ):
torch.manual_seed(1 )
lowerCamelCase__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.dummy_decoder
lowerCamelCase__ = self.dummy_text_proj
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = self.dummy_tokenizer
lowerCamelCase__ = self.dummy_super_res_first
lowerCamelCase__ = self.dummy_super_res_last
lowerCamelCase__ = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
lowerCamelCase__ = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
lowerCamelCase__ = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCamelCase__ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Tuple=True ):
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(__a )
else:
lowerCamelCase__ = torch.Generator(device=__a ).manual_seed(__a )
if pil_image:
lowerCamelCase__ = input_image * 0.5 + 0.5
lowerCamelCase__ = input_image.clamp(0 , 1 )
lowerCamelCase__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase__ = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__a )
lowerCamelCase__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__ = self.get_dummy_inputs(__a , pil_image=__a )
lowerCamelCase__ = pipe(**__a )
lowerCamelCase__ = output.images
lowerCamelCase__ = self.get_dummy_inputs(__a , pil_image=__a )
lowerCamelCase__ = pipe(
**__a , return_dict=__a , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__a )
lowerCamelCase__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__ = self.get_dummy_inputs(__a , pil_image=__a )
lowerCamelCase__ = pipe(**__a )
lowerCamelCase__ = output.images
lowerCamelCase__ = self.get_dummy_inputs(__a , pil_image=__a )
lowerCamelCase__ = pipe(
**__a , return_dict=__a , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__a )
lowerCamelCase__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__ = self.get_dummy_inputs(__a , pil_image=__a )
lowerCamelCase__ = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCamelCase__ = pipe(**__a )
lowerCamelCase__ = output.images
lowerCamelCase__ = self.get_dummy_inputs(__a , pil_image=__a )
lowerCamelCase__ = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCamelCase__ = pipe(
**__a , return_dict=__a , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCamelCase__ = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = torch.device('cpu' )
class _a :
a_ : str = 1
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__a )
lowerCamelCase__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__ = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase__ = pipe.decoder.dtype
lowerCamelCase__ = 1
lowerCamelCase__ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCamelCase__ = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
lowerCamelCase__ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCamelCase__ = pipe.prepare_latents(
__a , dtype=__a , device=__a , generator=__a , latents=__a , scheduler=DummyScheduler() )
lowerCamelCase__ = self.get_dummy_inputs(__a , pil_image=__a )
lowerCamelCase__ = pipe(
**__a , decoder_latents=__a , super_res_latents=__a ).images
lowerCamelCase__ = self.get_dummy_inputs(__a , pil_image=__a )
# Don't pass image, instead pass embedding
lowerCamelCase__ = pipeline_inputs.pop('image' )
lowerCamelCase__ = pipe.image_encoder(__a ).image_embeds
lowerCamelCase__ = pipe(
**__a , decoder_latents=__a , super_res_latents=__a , image_embeddings=__a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCamelCase__ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__a , expected_max_diff=__a )
@skip_mps
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = torch_device == 'cpu'
lowerCamelCase__ = True
lowerCamelCase__ = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , additional_params_copy_to_batched_inputs=__a , )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCamelCase__ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__a , additional_params_copy_to_batched_inputs=__a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__a )
@skip_mps
def _UpperCamelCase ( self : Optional[int] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCamelCase ( self : str ):
return super().test_save_load_local()
@skip_mps
def _UpperCamelCase ( self : int ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCamelCase__ = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCamelCase__ = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
lowerCamelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ = pipeline(
__a , generator=__a , output_type='np' , )
lowerCamelCase__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(__a , __a , 15 )
| 510 |
'''simple docstring'''
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self , __a = None ):
'''simple docstring'''
__a : list = []
# Stores indexes of each item for supporting updates and deletion.
__a : dict = {}
# Stores current size of heap.
__a : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__a : Tuple = key or (lambda __a : x)
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Dict = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a , __a : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__a , __a : Optional[Any] = self.arr[j], self.arr[i]
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Any = self._left(__a )
__a : Union[str, Any] = self._right(__a )
__a : Tuple = i
if left is not None and not self._cmp(__a , __a ):
__a : int = left
if right is not None and not self._cmp(__a , __a ):
__a : Any = right
return valid_parent
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[int] = self._parent(__a )
while parent is not None and not self._cmp(__a , __a ):
self._swap(__a , __a )
__a , __a : Optional[int] = parent, self._parent(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = self._get_valid_parent(__a )
while valid_parent != index:
self._swap(__a , __a )
__a , __a : Optional[Any] = valid_parent, self._get_valid_parent(__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if item not in self.pos_map:
return
__a : Tuple = self.pos_map[item]
__a : int = [item, self.key(__a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__a )
self._heapify_down(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if item not in self.pos_map:
return
__a : int = self.pos_map[item]
del self.pos_map[item]
__a : Optional[int] = self.arr[self.size - 1]
__a : Optional[int] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__a )
self._heapify_down(__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Dict = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__a )] )
else:
__a : List[Any] = [item, self.key(__a )]
__a : Union[str, Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : Dict ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
__SCREAMING_SNAKE_CASE : List[str] = torch.permute(__A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__A ):
# linear layer
__SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
__SCREAMING_SNAKE_CASE : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__SCREAMING_SNAKE_CASE : Dict = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def lowerCAmelCase_ ( lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict ):
'''simple docstring'''
if "metadata" in layer:
__SCREAMING_SNAKE_CASE : List[str] = layer.split('''metadata''' )
__SCREAMING_SNAKE_CASE : List[str] = """""".join(split_layer[0] )[:-1]
__SCREAMING_SNAKE_CASE : Tuple = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer.split('''kvstore''' )
__SCREAMING_SNAKE_CASE : Any = """""".join(split_layer[0] )[:-1]
__SCREAMING_SNAKE_CASE : List[Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__SCREAMING_SNAKE_CASE : Any = layer.split('''/''' )
__SCREAMING_SNAKE_CASE : Dict = """/""".join(split_layer[:-1] )
__SCREAMING_SNAKE_CASE : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
__SCREAMING_SNAKE_CASE : str = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
__SCREAMING_SNAKE_CASE : Optional[int] = """file"""
else:
__SCREAMING_SNAKE_CASE : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase_ ( lowercase_ : Optional[int] , lowercase_ : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = rename_keys(__A )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for k, v in current_block.items():
__SCREAMING_SNAKE_CASE : Optional[Any] = v
__SCREAMING_SNAKE_CASE : Dict = new_current_block
torch.save(__A , __A )
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[Any] = WEIGHTS_NAME ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = convert_file_size_to_int(__A )
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Tuple = 0
os.makedirs(__A , exist_ok=__A )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Any = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
__SCREAMING_SNAKE_CASE : Any = flatten_dict(__A , sep='''/''' )
__SCREAMING_SNAKE_CASE : int = {}
for layer in checkpoint_info.keys():
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_key_and_tensorstore_dict(
__A , __A , __A )
if curr_real_layer_name in all_layers:
__SCREAMING_SNAKE_CASE : Tuple = content
else:
__SCREAMING_SNAKE_CASE : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__SCREAMING_SNAKE_CASE : Dict = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(__A )
__SCREAMING_SNAKE_CASE : Optional[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__SCREAMING_SNAKE_CASE : Optional[int] = rename_base_flax_keys(tuple(key.split('''/''' ) ) , __A )
__SCREAMING_SNAKE_CASE : List[Any] = """/""".join(__A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(
__A , weights_name.replace('''.bin''' , F'''-{len(__A )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
del current_block
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[Any] = raw_weights.to(getattr(__A , __A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__SCREAMING_SNAKE_CASE : Any = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{len(__A )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : List[str] = {}
for idx, shard in enumerate(__A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(__A ):05d}.bin''' ) # len(sharded_state_dicts):05d}
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__A , os.path.join(__A , __A ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = shard
for key in shard:
__SCREAMING_SNAKE_CASE : List[str] = shard_file
# Add the metadata
__SCREAMING_SNAKE_CASE : List[str] = {"""total_size""": total_size}
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__A , __A ) , '''w''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = json.dumps(__A , indent=2 , sort_keys=__A ) + """\n"""
f.write(__A )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase_ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__SCREAMING_SNAKE_CASE : List[str] = TaTokenizer.from_pretrained('''t5-small''' )
__SCREAMING_SNAKE_CASE : int = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(__A , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : str = model.generate(__A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 715 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self :Dict , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = str(id_ )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = {} # {vertex:distance}
def __lt__( self :Any , _lowerCamelCase :Any ):
return self.key < other.key
def __repr__( self :Any ):
return self.id
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :str ):
self.neighbors.append(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :Any , _lowerCamelCase :Tuple ):
__SCREAMING_SNAKE_CASE : int = weight
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase_ )
graph[b - 1].add_edge(graph[a - 1] , lowercase_ )
def lowerCAmelCase_ ( lowercase_ : list , lowercase_ : Vertex ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = []
for u in graph:
__SCREAMING_SNAKE_CASE : Tuple = math.inf
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Dict = graph[:]
while q:
__SCREAMING_SNAKE_CASE : Tuple = min(lowercase_ )
q.remove(lowercase_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__SCREAMING_SNAKE_CASE : Tuple = u
__SCREAMING_SNAKE_CASE : List[str] = u.edges[v.id]
for i in range(1 , len(lowercase_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCAmelCase_ ( lowercase_ : list , lowercase_ : Vertex ):
'''simple docstring'''
for u in graph:
__SCREAMING_SNAKE_CASE : Optional[Any] = math.inf
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = list(lowercase_ )
hq.heapify(lowercase_ )
while h:
__SCREAMING_SNAKE_CASE : int = hq.heappop(lowercase_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__SCREAMING_SNAKE_CASE : Union[str, Any] = u
__SCREAMING_SNAKE_CASE : int = u.edges[v.id]
hq.heapify(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class a__ ( UpperCamelCase_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = BartTokenizer
def __init__( self : List[Any] ,a__ : List[str]=None ,a__ : Optional[Any]=None ,a__ : Optional[int]=None ,a__ : str="replace" ,a__ : Optional[Any]="<s>" ,a__ : List[Any]="</s>" ,a__ : Optional[int]="</s>" ,a__ : Union[str, Any]="<s>" ,a__ : str="<unk>" ,a__ : Union[str, Any]="<pad>" ,a__ : Any="<mask>" ,a__ : str=False ,a__ : Tuple=True ,**a__ : Dict ,) -> Tuple:
"""simple docstring"""
super().__init__(
a__ ,a__ ,tokenizer_file=a__ ,errors=a__ ,bos_token=a__ ,eos_token=a__ ,sep_token=a__ ,cls_token=a__ ,unk_token=a__ ,pad_token=a__ ,mask_token=a__ ,add_prefix_space=a__ ,trim_offsets=a__ ,**a__ ,)
_lowerCAmelCase:Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' ,a__) != add_prefix_space:
_lowerCAmelCase:Union[str, Any] = getattr(a__ ,pre_tok_state.pop('''type'''))
_lowerCAmelCase:Optional[int] = add_prefix_space
_lowerCAmelCase:Tuple = pre_tok_class(**a__)
_lowerCAmelCase:Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase:Optional[Any] = '''post_processor'''
_lowerCAmelCase:Dict = getattr(self.backend_tokenizer ,a__ ,a__)
if tokenizer_component_instance:
_lowerCAmelCase:List[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase:Union[str, Any] = tuple(state['''sep'''])
if "cls" in state:
_lowerCAmelCase:int = tuple(state['''cls'''])
_lowerCAmelCase:int = False
if state.get('''add_prefix_space''' ,a__) != add_prefix_space:
_lowerCAmelCase:Union[str, Any] = add_prefix_space
_lowerCAmelCase:List[str] = True
if state.get('''trim_offsets''' ,a__) != trim_offsets:
_lowerCAmelCase:str = trim_offsets
_lowerCAmelCase:Dict = True
if changes_to_apply:
_lowerCAmelCase:Any = getattr(a__ ,state.pop('''type'''))
_lowerCAmelCase:Optional[int] = component_class(**a__)
setattr(self.backend_tokenizer ,a__ ,a__)
@property
def __UpperCamelCase ( self : Any) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''')
return None
return str(self._mask_token)
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] ,a__ : Any) -> int:
"""simple docstring"""
_lowerCAmelCase:Tuple = AddedToken(a__ ,lstrip=a__ ,rstrip=a__) if isinstance(a__ ,a__) else value
_lowerCAmelCase:str = value
def __UpperCamelCase ( self : List[Any] ,*a__ : Any ,**a__ : Tuple) -> BatchEncoding:
"""simple docstring"""
_lowerCAmelCase:Tuple = kwargs.get('''is_split_into_words''' ,a__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''')
return super()._batch_encode_plus(*a__ ,**a__)
def __UpperCamelCase ( self : List[Any] ,*a__ : List[Any] ,**a__ : str) -> BatchEncoding:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = kwargs.get('''is_split_into_words''' ,a__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''')
return super()._encode_plus(*a__ ,**a__)
def __UpperCamelCase ( self : Union[str, Any] ,a__ : str ,a__ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_lowerCAmelCase:str = self._tokenizer.model.save(a__ ,name=a__)
return tuple(a__)
def __UpperCamelCase ( self : List[Any] ,a__ : Tuple ,a__ : List[str]=None) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : int ,a__ : List[int] ,a__ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_lowerCAmelCase:Tuple = [self.sep_token_id]
_lowerCAmelCase:Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 227 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase ( snake_case : Optional[Any] , snake_case : Optional[int] ):
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase ( snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Tuple ):
_lowerCAmelCase:List[str] = tmp_path / '''cache'''
_lowerCAmelCase:str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase:List[Any] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_sql_dataset(snake_case , snake_case )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase ( snake_case : List[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Tuple ):
_lowerCAmelCase:Union[str, Any] = tmp_path / '''cache'''
_lowerCAmelCase:Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCAmelCase:List[Any] = features.copy() if features else default_expected_features
_lowerCAmelCase:Dict = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase:Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=snake_case , cache_dir=snake_case ).read()
_check_sql_dataset(snake_case , snake_case )
def UpperCAmelCase ( snake_case : List[str] ):
with contextlib.closing(sqlitea.connect(snake_case ) ) as con:
_lowerCAmelCase:Tuple = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase ( snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Dict ):
_lowerCAmelCase:Dict = tmp_path / '''cache'''
_lowerCAmelCase:Optional[int] = os.path.join(snake_case , '''tmp.sql''' )
_lowerCAmelCase:Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case ).read()
SqlDatasetWriter(snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_lowerCAmelCase:int = iter_sql_file(snake_case )
_lowerCAmelCase:Any = iter_sql_file(snake_case )
for rowa, rowa in zip(snake_case , snake_case ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] ):
_lowerCAmelCase:Dict = tmp_path / '''cache'''
_lowerCAmelCase:Any = os.path.join(snake_case , '''tmp.sql''' )
_lowerCAmelCase:int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case ).read()
SqlDatasetWriter(snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_lowerCAmelCase:List[str] = iter_sql_file(snake_case )
_lowerCAmelCase:Tuple = iter_sql_file(snake_case )
for rowa, rowa in zip(snake_case , snake_case ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase ( snake_case : Dict , snake_case : Tuple , snake_case : Optional[int] ):
_lowerCAmelCase:List[str] = tmp_path / '''cache'''
_lowerCAmelCase:List[str] = os.path.join(snake_case , '''tmp.sql''' )
_lowerCAmelCase:Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=snake_case ).read()
with pytest.raises(snake_case ):
SqlDatasetWriter(snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 227 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "rwkv"
__UpperCamelCase = {"max_position_embeddings": "context_length"}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1e-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
"""simple docstring"""
lowerCamelCase = vocab_size
lowerCamelCase = context_length
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase = layer_norm_epsilon
lowerCamelCase = rescale_every
lowerCamelCase = use_cache
lowerCamelCase = bos_token_id
lowerCamelCase = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 533 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["note_seq"]
def __init__( self , *_a , **_a ):
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def _lowerCAmelCase ( cls , *_a , **_a ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _lowerCAmelCase ( cls , *_a , **_a ):
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 533 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class _UpperCamelCase ( a_ ):
'''simple docstring'''
_A : Union[str, Any] = '''van'''
def __init__( self : Optional[int] , lowerCAmelCase__ : List[Any]=2_2_4 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : List[str]=[7, 3, 3, 3] , lowerCAmelCase__ : Dict=[4, 2, 2, 2] , lowerCAmelCase__ : List[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCAmelCase__ : Dict=[3, 3, 1_2, 3] , lowerCAmelCase__ : Optional[int]=[8, 8, 4, 4] , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : List[Any]=1E-6 , lowerCAmelCase__ : Any=1E-2 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Any=0.0 , **lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : Optional[int] = patch_sizes
__SCREAMING_SNAKE_CASE : Tuple = strides
__SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes
__SCREAMING_SNAKE_CASE : Tuple = depths
__SCREAMING_SNAKE_CASE : int = mlp_ratios
__SCREAMING_SNAKE_CASE : int = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Tuple = layer_scale_init_value
__SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = dropout_rate
| 578 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 387 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase = prime_factors(_lowerCamelCase )
if is_square_free(_lowerCamelCase ):
return -1 if len(_lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import numpy as np
def _UpperCAmelCase (UpperCamelCase_ : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = ["""input_features""", """is_longer"""]
def __init__( self , __lowercase=64 , __lowercase=48_000 , __lowercase=480 , __lowercase=10 , __lowercase=1_024 , __lowercase=0.0 , __lowercase=False , __lowercase = 0 , __lowercase = 14_000 , __lowercase = None , __lowercase = "fusion" , __lowercase = "repeatpad" , **__lowercase , ) -> int:
super().__init__(
feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
__UpperCamelCase :str = top_db
__UpperCamelCase :Union[str, Any] = truncation
__UpperCamelCase :List[str] = padding
__UpperCamelCase :int = fft_window_size
__UpperCamelCase :Optional[int] = (fft_window_size >> 1) + 1
__UpperCamelCase :str = hop_length
__UpperCamelCase :Optional[Any] = max_length_s
__UpperCamelCase :Optional[int] = max_length_s * sampling_rate
__UpperCamelCase :int = sampling_rate
__UpperCamelCase :Any = frequency_min
__UpperCamelCase :Optional[Any] = frequency_max
__UpperCamelCase :Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowercase , min_frequency=__lowercase , max_frequency=__lowercase , sampling_rate=__lowercase , norm=__lowercase , mel_scale='''htk''' , )
__UpperCamelCase :List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowercase , min_frequency=__lowercase , max_frequency=__lowercase , sampling_rate=__lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :Tuple = copy.deepcopy(self.__dict__)
__UpperCamelCase :Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> np.ndarray:
__UpperCamelCase :Optional[int] = spectrogram(
__lowercase , window_function(self.fft_window_size , '''hann''') , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__lowercase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :str = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase :Optional[Any] = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase :Dict = [0]
# randomly choose index for each part
__UpperCamelCase :List[Any] = np.random.choice(ranges[0])
__UpperCamelCase :str = np.random.choice(ranges[1])
__UpperCamelCase :Any = np.random.choice(ranges[2])
__UpperCamelCase :List[str] = mel[idx_front : idx_front + chunk_frames, :]
__UpperCamelCase :List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCamelCase :Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
__UpperCamelCase :Tuple = torch.tensor(mel[None, None, :])
__UpperCamelCase :Tuple = torch.nn.functional.interpolate(
__lowercase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=__lowercase)
__UpperCamelCase :Tuple = mel_shrink[0][0].numpy()
__UpperCamelCase :List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCamelCase :Any = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCamelCase :Optional[int] = len(__lowercase) - max_length
__UpperCamelCase :int = np.random.randint(0 , overflow + 1)
__UpperCamelCase :List[Any] = waveform[idx : idx + max_length]
__UpperCamelCase :Union[str, Any] = self._np_extract_fbank_features(__lowercase , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
__UpperCamelCase :str = self._np_extract_fbank_features(__lowercase , self.mel_filters)
__UpperCamelCase :int = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCamelCase :Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCamelCase :List[Any] = np.stack([mel, mel, mel, mel] , axis=0)
__UpperCamelCase :Any = False
else:
__UpperCamelCase :List[Any] = self._random_mel_fusion(__lowercase , __lowercase , __lowercase)
__UpperCamelCase :int = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""")
else:
__UpperCamelCase :Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCamelCase :List[str] = int(max_length / len(__lowercase))
__UpperCamelCase :Tuple = np.stack(np.tile(__lowercase , n_repeat + 1))[:max_length]
if padding == "repeatpad":
__UpperCamelCase :Tuple = int(max_length / len(__lowercase))
__UpperCamelCase :Any = np.stack(np.tile(__lowercase , __lowercase))
__UpperCamelCase :str = np.pad(__lowercase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0)
if truncation == "fusion":
__UpperCamelCase :str = self._np_extract_fbank_features(__lowercase , self.mel_filters)
__UpperCamelCase :int = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
__UpperCamelCase :Union[str, Any] = self._np_extract_fbank_features(__lowercase , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Optional[int] = truncation if truncation is not None else self.truncation
__UpperCamelCase :int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
__UpperCamelCase :Union[str, Any] = isinstance(__lowercase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
__UpperCamelCase :Optional[Any] = is_batched_numpy or (
isinstance(__lowercase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCamelCase :Union[str, Any] = [np.asarray(__lowercase , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray):
__UpperCamelCase :Union[str, Any] = np.asarray(__lowercase , dtype=np.floataa)
elif isinstance(__lowercase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCamelCase :Dict = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCamelCase :List[str] = [np.asarray(__lowercase)]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCamelCase :Tuple = [
self._get_input_mel(__lowercase , max_length if max_length else self.nb_max_samples , __lowercase , __lowercase)
for waveform in raw_speech
]
__UpperCamelCase :List[str] = []
__UpperCamelCase :List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(__lowercase)
is_longer.append(__lowercase)
if truncation == "fusion" and sum(__lowercase) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCamelCase :Tuple = np.random.randint(0 , len(__lowercase))
__UpperCamelCase :List[str] = True
if isinstance(input_mel[0] , __lowercase):
__UpperCamelCase :Optional[int] = [np.asarray(__lowercase , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
__UpperCamelCase :Dict = [[longer] for longer in is_longer]
__UpperCamelCase :Tuple = {'''input_features''': input_mel, '''is_longer''': is_longer}
__UpperCamelCase :List[str] = BatchFeature(__lowercase)
if return_tensors is not None:
__UpperCamelCase :List[str] = input_features.convert_to_tensors(__lowercase)
return input_features
| 167 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowercase = None
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__lowercase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = BigBirdTokenizer
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="[SEP]" , __lowercase="[MASK]" , __lowercase="[CLS]" , **__lowercase , ) -> int:
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else bos_token
__UpperCamelCase :List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else eos_token
__UpperCamelCase :int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else unk_token
__UpperCamelCase :str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else pad_token
__UpperCamelCase :Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else cls_token
__UpperCamelCase :Union[str, Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__UpperCamelCase :str = vocab_file
__UpperCamelCase :int = False if not self.vocab_file else True
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :int = [self.sep_token_id]
__UpperCamelCase :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Dict = [self.sep_token_id]
__UpperCamelCase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Dict = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase):
copyfile(self.vocab_file , __lowercase)
return (out_vocab_file,)
| 167 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a__ : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowercase :
"""simple docstring"""
snake_case_ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'The column name of the images in the files.'} )
snake_case_ = field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the training data.'} )
snake_case_ = field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} )
snake_case_ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = {}
if self.train_dir is not None:
lowerCamelCase__ = self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ = self.validation_dir
lowerCamelCase__ = data_files if data_files else None
@dataclass
class lowercase :
"""simple docstring"""
snake_case_ = field(
default=UpperCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
snake_case_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case_ = field(default=UpperCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
snake_case_ = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
snake_case_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def snake_case (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , UpperCamelCase , UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCamelCase ) and data_args.train_val_split > 0.0:
lowerCamelCase__ = ds["""train"""].train_test_split(data_args.train_val_split )
lowerCamelCase__ = split["""train"""]
lowerCamelCase__ = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCamelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
lowerCamelCase__ = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase )
else:
lowerCamelCase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase__ = ViTMAEForPreTraining(UpperCamelCase )
if training_args.do_train:
lowerCamelCase__ = ds["""train"""].column_names
else:
lowerCamelCase__ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ = """image"""
elif "img" in column_names:
lowerCamelCase__ = """img"""
else:
lowerCamelCase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase__ = image_processor.size["""shortest_edge"""]
else:
lowerCamelCase__ = (image_processor.size["""height"""], image_processor.size["""width"""])
lowerCamelCase__ = Compose(
[
Lambda(lambda UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCamelCase : List[str] ):
lowerCamelCase__ = [transforms(UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowerCamelCase__ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowerCamelCase__ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCamelCase )
# Compute absolute learning rate
lowerCamelCase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase__ = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCamelCase )
trainer.save_metrics("""eval""" , UpperCamelCase )
# Write model card and (optionally) push to hub
lowerCamelCase__ = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase )
else:
trainer.create_model_card(**UpperCamelCase )
def snake_case (UpperCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 235 |
def snake_case (UpperCamelCase : list , UpperCamelCase : list , UpperCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ = len(UpperCamelCase )
lowerCamelCase__ = [[0] * n for i in range(UpperCamelCase )]
for i in range(UpperCamelCase ):
lowerCamelCase__ = y_points[i]
for i in range(2 , UpperCamelCase ):
for j in range(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 485 |
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> float:
return base * power(UpperCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
A = int(input('Enter the base: ').strip())
A = int(input('Enter the exponent: ').strip())
A = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 544 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
def snake_case__ ( UpperCAmelCase : int ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError("only integers accepted as input" )
else:
lowerCAmelCase__ :str = str(abs(UpperCAmelCase ) )
lowerCAmelCase__ :Union[str, Any] = [list(UpperCAmelCase ) for char in range(len(UpperCAmelCase ) )]
for index in range(len(UpperCAmelCase ) ):
num_transpositions[index].pop(UpperCAmelCase )
return max(
int("".join(list(UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 145 |
from math import isclose, sqrt
def snake_case__ ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ):
lowerCAmelCase__ :Optional[int] = point_y / 4 / point_x
lowerCAmelCase__ :Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ :List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ :Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ :int = outgoing_gradient**2 + 4
lowerCAmelCase__ :List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ :str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
lowerCAmelCase__ :Any = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ :List[str] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ :Dict = x_minus if isclose(UpperCAmelCase , UpperCAmelCase ) else x_plus
lowerCAmelCase__ :List[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case__ ( UpperCAmelCase : float = 1.4 , UpperCAmelCase : float = -9.6 ):
lowerCAmelCase__ :int = 0
lowerCAmelCase__ :float = first_x_coord
lowerCAmelCase__ :float = first_y_coord
lowerCAmelCase__ :float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :List[str] = next_point(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 145 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : List[str] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( a : List[str] , a : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Any = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("metadata" )
_snake_case : Optional[int] = "".join(split_layer[0] )[:-1]
_snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Any = layer.split("kvstore" )
_snake_case : str = "".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : List[Any] = layer.split("/" )
_snake_case : Tuple = "/".join(split_layer[:-1] )
_snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_snake_case : Tuple = "file"
else:
_snake_case : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = rename_keys(a )
_snake_case : int = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Optional[int] = new_current_block
torch.save(a , a )
def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ):
"""simple docstring"""
_snake_case : Any = convert_file_size_to_int(a )
_snake_case : Tuple = []
_snake_case : Optional[int] = {}
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(a , sep="/" )
_snake_case : Optional[Any] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
_snake_case : Dict = content
else:
_snake_case : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Dict = torch.tensor(a )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
_snake_case : Optional[Any] = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Any = os.path.join(
a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : List[Any] = {}
_snake_case : str = 0
_snake_case : List[str] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : str = {}
_snake_case : Any = {}
for idx, shard in enumerate(a ):
_snake_case : Optional[int] = weights_name.replace(
".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d}
_snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a , os.path.join(a , a ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : List[Any] = {"total_size": total_size}
_snake_case : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" )
_snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids
_snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 87 | 0 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_UpperCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase :
"""simple docstring"""
A__ : Optional[int] = 'dummy_data'
A__ : Dict = 'datasets'
A__ : List[Any] = False
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case = False , _snake_case = True , _snake_case = None , ) -> Any:
_UpperCamelCase : Tuple = 0
_UpperCamelCase : Any = dataset_name
_UpperCamelCase : int = cache_dir
_UpperCamelCase : Optional[Any] = use_local_dummy_data
_UpperCamelCase : str = config
# download_callbacks take a single url as input
_UpperCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_UpperCamelCase : Optional[int] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_UpperCamelCase : Union[str, Any] = str(lowercase_ )
# to be downloaded
_UpperCamelCase : str = None
_UpperCamelCase : str = None
@property
def _lowercase ( self ) -> Optional[int]:
if self._dummy_file is None:
_UpperCamelCase : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self ) -> int:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def _lowercase ( self ) -> Optional[Any]:
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : List[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_UpperCamelCase : Optional[int] = cached_path(
lowercase_ , cache_dir=self.cache_dir , extract_compressed_file=lowercase_ , force_extract=lowercase_ )
return os.path.join(lowercase_ , self.dummy_file_name )
@property
def _lowercase ( self ) -> Dict:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _lowercase ( self ) -> Any:
if self._bucket_url is None:
_UpperCamelCase : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def _lowercase ( self ) -> Union[str, Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def _lowercase ( self , _snake_case , *_snake_case ) -> Any:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_UpperCamelCase : str = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_UpperCamelCase : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase_ , lowercase_ ):
return self.create_dummy_data_dict(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , (list, tuple) ):
return self.create_dummy_data_list(lowercase_ , lowercase_ )
else:
return self.create_dummy_data_single(lowercase_ , lowercase_ )
def _lowercase ( self , _snake_case , *_snake_case ) -> Optional[Any]:
return self.download_and_extract(lowercase_ )
def _lowercase ( self , _snake_case , _snake_case ) -> str:
return self.download_and_extract(lowercase_ )
def _lowercase ( self , _snake_case , *_snake_case , **_snake_case ) -> Any:
return path
def _lowercase ( self ) -> Any:
return {}
def _lowercase ( self , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase_ , lowercase_ ):
for single_url in single_urls:
download_callback(lowercase_ )
else:
_UpperCamelCase : Union[str, Any] = single_urls
download_callback(lowercase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase_ , lowercase_ ):
_UpperCamelCase : Optional[int] = [os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_ ).name ) ) for x in single_urls]
else:
_UpperCamelCase : List[Any] = single_urls
_UpperCamelCase : Optional[Any] = os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_ ).name ) )
_UpperCamelCase : Tuple = value
# make sure that values are unique
if all(isinstance(lowercase_ , lowercase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_UpperCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self , _snake_case , _snake_case ) -> Tuple:
_UpperCamelCase : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_UpperCamelCase : Optional[int] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , lowercase_ ) ) for url in data_url )
_UpperCamelCase : int = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_UpperCamelCase : List[Any] = [data_url[0]] * len(lowercase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCamelCase : Any = os.path.join(lowercase_ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(lowercase_ )
return dummy_data_list
def _lowercase ( self , _snake_case , _snake_case ) -> Any:
for download_callback in self.download_callbacks:
download_callback(lowercase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCamelCase : Optional[Any] = os.path.join(lowercase_ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(lowercase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self ) -> str:
pass
def _lowercase ( self ) -> Any:
pass
def _lowercase ( self , _snake_case ) -> Dict:
def _iter_archive_members(_snake_case ):
# this preserves the order of the members inside the ZIP archive
_UpperCamelCase : Tuple = Path(self.dummy_file ).parent
_UpperCamelCase : Tuple = path.relative_to(lowercase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_UpperCamelCase : Union[str, Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase_ )
_UpperCamelCase : List[Any] = Path(lowercase_ )
_UpperCamelCase : Tuple = _iter_archive_members(lowercase_ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(lowercase_ ).as_posix(), file_path.open('''rb''' )
def _lowercase ( self , _snake_case ) -> List[str]:
if not isinstance(lowercase_ , lowercase_ ):
_UpperCamelCase : Dict = [paths]
for path in paths:
if os.path.isfile(lowercase_ ):
if os.path.basename(lowercase_ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase_ ):
if os.path.basename(lowercase_ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(lowercase_ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(lowercase_ , lowercase_ )
| 683 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 670 | 0 |
"""simple docstring"""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a =re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class __UpperCAmelCase :
A__ : str
A__ : Optional[str] = None
A__ : Optional[Union[str, int]] = None
A__ : Optional[Union[str, int]] = None
A__ : Optional[Union[str, int]] = None
def _a ( self ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =_str_to_version_tuple(self.version_str )
def __repr__( self ):
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def _a ( self ):
return self.major, self.minor, self.patch
def _a ( self , _lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return Version(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return other
raise TypeError(F'''{other} (type {type(_lowerCamelCase )}) cannot be compared to version.''' )
def __eq__( self , _lowerCamelCase ):
try:
lowerCamelCase__ =self._validate_operand(_lowerCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _lowerCamelCase ):
lowerCamelCase__ =self._validate_operand(_lowerCamelCase )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _a ( cls , _lowerCamelCase ):
lowerCamelCase__ ={f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _a ( self ):
return self.version_str
def lowerCamelCase_ ( __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ =_VERSION_REG.match(__lowerCAmelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(__lowerCAmelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return ".".join(str(__lowerCAmelCase ) for v in version_tuple )
| 719 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
lowerCamelCase__ =quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 132 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[int] = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 556 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : int = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 556 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = len(snake_case_ ) # No of vertices in graph
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
def dfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase = True
UpperCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case_ , snake_case_ , snake_case_ , id_ )
UpperCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase = min(low[at] , low[to] )
UpperCAmelCase = []
for i in range(snake_case_ ):
if not visited[i]:
dfs(snake_case_ , -1 , snake_case_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__lowerCAmelCase ={
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__lowerCAmelCase =logging.get_logger(__name__)
class __magic_name__ ( _a):
_UpperCAmelCase : Tuple = 'maskformer'
_UpperCAmelCase : Dict = {'hidden_size': 'mask_feature_size'}
_UpperCAmelCase : Dict = ['resnet', 'swin']
_UpperCAmelCase : Tuple = ['detr']
def __init__( self : Dict ,__SCREAMING_SNAKE_CASE : int = 2_5_6 ,__SCREAMING_SNAKE_CASE : int = 2_5_6 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : Optional[Dict] = None ,__SCREAMING_SNAKE_CASE : Optional[Dict] = None ,__SCREAMING_SNAKE_CASE : float = 0.02 ,__SCREAMING_SNAKE_CASE : float = 1.0 ,__SCREAMING_SNAKE_CASE : float = 1.0 ,__SCREAMING_SNAKE_CASE : float = 1.0 ,__SCREAMING_SNAKE_CASE : float = 20.0 ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase = SwinConfig(
image_size=3_8_4 ,in_channels=3 ,patch_size=4 ,embed_dim=1_2_8 ,depths=[2, 2, 1_8, 2] ,num_heads=[4, 8, 1_6, 3_2] ,window_size=1_2 ,drop_path_rate=0.3 ,out_features=["stage1", "stage2", "stage3", "stage4"] ,)
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = backbone_config.pop("model_type" )
UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase = config_class.from_dict(__SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase = (
decoder_config.pop("model_type" ) if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = CONFIG_MAPPING[decoder_type]
UpperCAmelCase = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = backbone_config
UpperCAmelCase = decoder_config
# main feature dimension for the model
UpperCAmelCase = fpn_feature_size
UpperCAmelCase = mask_feature_size
# initializer
UpperCAmelCase = init_std
UpperCAmelCase = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase = cross_entropy_weight
UpperCAmelCase = dice_weight
UpperCAmelCase = mask_weight
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = no_object_weight
UpperCAmelCase = output_auxiliary_logits
UpperCAmelCase = self.decoder_config.encoder_attention_heads
UpperCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**__SCREAMING_SNAKE_CASE )
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] ,__SCREAMING_SNAKE_CASE : PretrainedConfig ,__SCREAMING_SNAKE_CASE : PretrainedConfig ,**__SCREAMING_SNAKE_CASE : str ):
return cls(
backbone_config=__SCREAMING_SNAKE_CASE ,decoder_config=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.backbone_config.to_dict()
UpperCAmelCase = self.decoder_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 405 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCAmelCase_ ( lowercase: int ) -> typing.Counter[int]:
'''simple docstring'''
_UpperCamelCase: typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowercase , max_perimeter + 1 ):
_UpperCamelCase: List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowercase ):
_UpperCamelCase: Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCAmelCase_ ( lowercase: int = 1_000 ) -> int:
'''simple docstring'''
_UpperCamelCase: Optional[int] = pythagorean_triple(lowercase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 271 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCAmelCase_ ( lowercase: str , lowercase: complex , lowercase: str = "x" , lowercase: float = 10**-10 , lowercase: int = 1 , ) -> complex:
'''simple docstring'''
_UpperCamelCase: Any = symbols(lowercase )
_UpperCamelCase: str = lambdify(lowercase , lowercase )
_UpperCamelCase: str = lambdify(lowercase , diff(lowercase , lowercase ) )
_UpperCamelCase: Optional[int] = starting_point
while True:
if diff_function(lowercase ) != 0:
_UpperCamelCase: int = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCamelCase: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 271 | 1 |
import numpy as np
def a__ (__lowercase :np.ndarray , __lowercase :float ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Dict =logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] ={
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Any = "xmod"
def __init__( self ,A__=30522 ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=512 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=None ,A__=False ,A__=2 ,A__=False ,A__=True ,A__=True ,A__=("en_XX",) ,A__=None ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ )
_A : Union[str, Any] = vocab_size
_A : List[str] = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : str = num_attention_heads
_A : Tuple = hidden_act
_A : Optional[int] = intermediate_size
_A : List[str] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : List[str] = initializer_range
_A : Tuple = layer_norm_eps
_A : int = position_embedding_type
_A : str = use_cache
_A : int = classifier_dropout
_A : Optional[Any] = pre_norm
_A : Dict = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[Any] = adapter_reuse_layer_norm
_A : Optional[Any] = ln_before_adapter
_A : int = list(A__ )
_A : str = default_language
class UpperCAmelCase__ ( __snake_case ):
@property
def A__ ( self ):
if self.task == "multiple-choice":
_A : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = BioGptTokenizer
lowerCamelCase__ : List[Any] = False
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(A_ ) )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = '''lower newer'''
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = '''lower'''
SCREAMING_SNAKE_CASE__ = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''sequence builders''' , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 100 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """mock-s3-bucket"""
__lowerCamelCase = f'''s3://{mock_bucket}'''
__lowerCamelCase = extract_path_from_uri(_A )
assert dataset_path.startswith("""s3://""" ) is False
__lowerCamelCase = """./local/path"""
__lowerCamelCase = extract_path_from_uri(_A )
assert dataset_path == new_dataset_path
def UpperCamelCase__ ( _A: List[Any] ):
'''simple docstring'''
__lowerCamelCase = is_remote_filesystem(_A )
assert is_remote is True
__lowerCamelCase = fsspec.filesystem("""file""" )
__lowerCamelCase = is_remote_filesystem(_A )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , _A )
def UpperCamelCase__ ( _A: List[str] , _A: Tuple , _A: List[Any] , _A: Any , _A: List[Any] , _A: Optional[int] , _A: List[str] ):
'''simple docstring'''
__lowerCamelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
__lowerCamelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
__lowerCamelCase = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
__lowerCamelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_A )
assert isinstance(_A , _A )
__lowerCamelCase = os.path.basename(_A )
__lowerCamelCase = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(_A , """r""" , encoding="""utf-8""" ) as f, open(_A , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def UpperCamelCase__ ( _A: Optional[Any] , _A: Union[str, Any] , _A: int ):
'''simple docstring'''
__lowerCamelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
__lowerCamelCase = compressed_file_paths[protocol]
__lowerCamelCase = """dataset.jsonl"""
__lowerCamelCase = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
__lowerCamelCase , *__lowerCamelCase = fsspec.get_fs_token_paths(_A )
assert fs.isfile(_A )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def UpperCamelCase__ ( _A: str , _A: str , _A: Optional[int] , _A: Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = hf_api.dataset_info(_A , token=_A )
__lowerCamelCase = HfFileSystem(repo_info=_A , token=_A )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(_A ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_A , _A , clobber=_A )
with pytest.warns(_A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_A ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 479 | 0 |
'''simple docstring'''
__UpperCAmelCase = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image', 'mask_image'])
__UpperCAmelCase = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset(['input_tokens'])
__UpperCAmelCase = frozenset(['input_tokens'])
| 220 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray:
SCREAMING_SNAKE_CASE : List[Any] = np.zeros_like(snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE : Optional[int] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCAmelCase = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__UpperCAmelCase = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCAmelCase = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 220 | 1 |
def a (lowerCAmelCase__ ):
__a = len(lowerCAmelCase__ )
while cur > 1:
# Find the maximum number in arr
__a = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a = arr[mi::-1] + arr[mi + 1 : len(lowerCAmelCase__ )]
# Reverse whole list
__a = arr[cur - 1 :: -1] + arr[cur : len(lowerCAmelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 99 |
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCamelCase_ = 6
lowerCamelCase_ = 1
lowerCamelCase_ = 1901
lowerCamelCase_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCamelCase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCamelCase_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 675 | 0 |
_lowerCAmelCase = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 71 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 - _cos) / 2
_UpperCamelCase = 1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 + _cos) / 2
_UpperCamelCase = -1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = _sin / 2
_UpperCamelCase = 0
_UpperCamelCase = -ba
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 1 - alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = 1 + alpha * big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha * big_a
_UpperCamelCase = 1 + alpha / big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha / big_a
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (pmc + aaa)
_UpperCamelCase = 2 * big_a * mpc
_UpperCamelCase = big_a * (pmc - aaa)
_UpperCamelCase = ppmc + aaa
_UpperCamelCase = -2 * pmpc
_UpperCamelCase = ppmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (ppmc + aaa)
_UpperCamelCase = -2 * big_a * pmpc
_UpperCamelCase = big_a * (ppmc - aaa)
_UpperCamelCase = pmc + aaa
_UpperCamelCase = 2 * mpc
_UpperCamelCase = pmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 71 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__A : Tuple = parent
__A : str = batch_size
__A : Optional[int] = num_channels
__A : str = image_size
__A : List[str] = min_resolution
__A : Optional[Any] = max_resolution
__A : str = do_resize
__A : Dict = size
__A : Optional[Any] = apply_ocr
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = LayoutLMvaImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : int = image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase)
self.assertIsInstance(encoding.boxes , _UpperCAmelCase)
# Test batched
__A : str = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : Any = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : int = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
__A : List[Any] = Image.open(ds[0]['file']).convert('RGB')
__A : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A : str = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__A : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase)
self.assertListEqual(encoding.boxes , _UpperCAmelCase)
# with apply_OCR = False
__A : str = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase)
__A : List[str] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
| 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 8 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _a ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
A = (DPMSolverSDEScheduler,)
A = 10
def __a ( self ,**__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**snake_case__ )
return config
def __a ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def __a ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ ,beta_end=snake_case__ )
def __a ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def __a ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[Any] = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : List[str] = model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def __a ( self ):
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : List[str] = model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : str = output.prev_sample
SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def __a ( self ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps ,device=snake_case__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : str = model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : Any = scheduler.step(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def __a ( self ):
SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**snake_case__ ,use_karras_sigmas=snake_case__ )
scheduler.set_timesteps(self.num_inference_steps ,device=snake_case__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[Any] = sample.to(snake_case__ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : List[str] = model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : str = scheduler.step(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE : str = output.prev_sample
SCREAMING_SNAKE_CASE : List[str] = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 719 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = ['pixel_values']
def __init__( self ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = 1 / 255 ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = True ,**__SCREAMING_SNAKE_CASE ,):
super().__init__(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 256, 'width': 256}
SCREAMING_SNAKE_CASE : str = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' )
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : List[str] = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE : Any = crop_size
SCREAMING_SNAKE_CASE : List[str] = do_flip_channel_order
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = PIL.Image.BILINEAR ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : int = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(__SCREAMING_SNAKE_CASE ,size=size['shortest_edge'] ,default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__SCREAMING_SNAKE_CASE ,size=(size['height'], size['width']) ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
return rescale(__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
return flip_channel_order(__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' )
SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Tuple = [self.resize(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[str] = [self.center_crop(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Dict = [self.rescale(image=__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE : Optional[int] = [self.flip_channel_order(image=__SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE : Dict = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = {'pixel_values': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE ,tensor_type=__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Tuple = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[Any] = []
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 220 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Any = GPTaTokenizer
__lowercase : List[str] = GPTaTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Any = {'''add_prefix_space''': True}
__lowercase : List[Any] = False
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__snake_case = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = '''lower newer'''
__snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = '''lower newer'''
# Testing tokenization
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
__snake_case = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing the unknown token
__snake_case = tokens + [rust_tokenizer.unk_token]
__snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Simple input
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case = ('''This is a simple input''', '''This is a pair''')
__snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' , )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
__snake_case = ('''This is a simple input''', '''This is a pair''')
__snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__snake_case = tokenizer.pad_token_id
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
__snake_case = tokenizer(*__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''$$$'''
__snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__SCREAMING_SNAKE_CASE , add_bos_token=__SCREAMING_SNAKE_CASE )
__snake_case = '''This is a simple input'''
__snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
__snake_case = tokenizer.bos_token_id
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case = tokenizer.decode(out_s.input_ids )
__snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = [self.get_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , add_bos_token=__SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__snake_case = '''Encode this.'''
__snake_case = '''This one too please.'''
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode_plus(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , )
__snake_case = encoded_sequence_dict['''input_ids''']
__snake_case = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
__snake_case = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__SCREAMING_SNAKE_CASE )
]
__snake_case = [x for x in filtered_sequence if x is not None]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__SCREAMING_SNAKE_CASE )
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
self.assertEqual(__SCREAMING_SNAKE_CASE , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
__snake_case = AutoTokenizer.from_pretrained('''./test_opt''' )
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
self.assertEqual(__SCREAMING_SNAKE_CASE , [2, 250, 1345, 9, 10, 4758] )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__SCREAMING_SNAKE_CASE )
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(__SCREAMING_SNAKE_CASE , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__SCREAMING_SNAKE_CASE )
__snake_case = '''bos'''
__snake_case = tokenizer.get_vocab()['''bos''']
__snake_case = '''A photo of a cat'''
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(__SCREAMING_SNAKE_CASE , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
__snake_case = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
__snake_case = tokenizer.encode(
__SCREAMING_SNAKE_CASE , )
self.assertEqual(__SCREAMING_SNAKE_CASE , [3_1957, 250, 1345, 9, 10, 4758] )
| 24 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase__ : Optional[int] = False
@skip_mps
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCamelCase_ = False
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCAmelCase__ ( cls) -> str:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase)
@classmethod
def lowerCAmelCase__ ( cls) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Any:
torch.manual_seed(0)
UpperCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase , )
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0)
UpperCamelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
UpperCamelCase__ : List[Any] = CLIPTextModel(UpperCamelCase)
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
UpperCamelCase__ : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase=0) -> str:
if str(UpperCamelCase).startswith('mps'):
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCamelCase)
else:
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCamelCase).manual_seed(UpperCamelCase)
UpperCamelCase__ : List[Any] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ : Optional[Any] = self.pipeline_class(**UpperCamelCase)
pipe.to(UpperCamelCase)
pipe.set_progress_bar_config(disable=UpperCamelCase)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = pipe(**UpperCamelCase).images
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3))
UpperCamelCase__ : str = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496])
UpperCamelCase__ : List[str] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCamelCase , 1E-3)
def lowerCAmelCase__ ( self) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4)
def lowerCAmelCase__ ( self) -> str:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def lowerCAmelCase__ ( self) -> List[str]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4)
def lowerCAmelCase__ ( self) -> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def lowerCAmelCase__ ( self) -> Optional[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4)
def lowerCAmelCase__ ( self) -> Dict:
super().test_save_load_local(expected_max_difference=5E-4)
def lowerCAmelCase__ ( self) -> Dict:
super().test_save_load_optional_components(expected_max_difference=4E-4)
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase)
@classmethod
def lowerCAmelCase__ ( cls) -> str:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ : Dict = torch.manual_seed(51)
UpperCamelCase__ : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCamelCase , torch_dtype=torch.floataa)
pipe.to('cuda')
UpperCamelCase__ : Optional[int] = 'a painting of an elephant with glasses'
UpperCamelCase__ : Dict = [5, 7]
UpperCamelCase__ : Optional[int] = pipe(
prompt=UpperCamelCase , token_indices=UpperCamelCase , guidance_scale=7.5 , generator=UpperCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
UpperCamelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy')
assert np.abs((expected_image - image).max()) < 5E-1
| 410 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCamelCase_ : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowerCamelCase_ : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def UpperCAmelCase__ (self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowerCamelCase_ : Tuple = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ (self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowerCamelCase_ : str = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ (self ):
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
lowerCamelCase_ : Optional[Any] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__lowercase : Dict = Accelerator()
__lowercase : str = (accelerator.state.process_index + 2, 10)
__lowercase : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
__lowercase : Any = ''''''
__lowercase : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__lowercase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__lowercase : Union[str, Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 | 0 |
SCREAMING_SNAKE_CASE__ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def A ( __UpperCamelCase ) -> int:
A__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
SCREAMING_SNAKE_CASE__ = [None] * 1_0_0_0_0_0_0_0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def A ( __UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A__ = chain(next_number(__UpperCamelCase ) )
A__ = number_chain
while number < 10_000_000:
A__ = number_chain
number *= 10
return number_chain
def A ( __UpperCamelCase = 10_000_000 ) -> int:
for i in range(1 , __UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 9 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str=3 , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : str=1_0 , __lowerCamelCase : Union[str, Any]=[8, 1_6, 3_2, 6_4] , __lowerCamelCase : Union[str, Any]=[1, 1, 2, 1] , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=["stage2", "stage3", "stage4"] , __lowerCamelCase : List[Any]=[2, 3, 4] , __lowerCamelCase : int=1 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embeddings_size
_SCREAMING_SNAKE_CASE = hidden_sizes
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = out_features
_SCREAMING_SNAKE_CASE = out_indices
_SCREAMING_SNAKE_CASE = num_groups
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( A , A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions" )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE = layer_type
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCamelCase_ = BitConfig
lowerCamelCase_ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModelTester(self )
| 418 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 583 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_snake_case = random.Random()
def _UpperCamelCase ( snake_case__, snake_case__=1.0, snake_case__=None, snake_case__=None ) -> int:
if rng is None:
__UpperCAmelCase : Union[str, Any] = global_rng
__UpperCAmelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _snake_case ( unittest.TestCase ):
def __init__( self: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=7 , __lowerCamelCase: str=4_00 , __lowerCamelCase: Union[str, Any]=20_00 , __lowerCamelCase: Optional[int]=24 , __lowerCamelCase: List[Any]=24 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: int=1_60_00 , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Dict=True , ) -> int:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Tuple = min_seq_length
__UpperCAmelCase : str = max_seq_length
__UpperCAmelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Union[str, Any] = feature_size
__UpperCAmelCase : List[Any] = num_mel_bins
__UpperCAmelCase : Union[str, Any] = padding_value
__UpperCAmelCase : Union[str, Any] = sampling_rate
__UpperCAmelCase : Optional[int] = return_attention_mask
__UpperCAmelCase : Optional[Any] = do_normalize
def _lowerCamelCase ( self: int ) -> List[str]:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int=False , __lowerCamelCase: Optional[int]=False ) -> Optional[int]:
def _flatten(__lowerCamelCase: int ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
__UpperCAmelCase : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCAmelCase : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase : Tuple = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( __UpperCamelCase , unittest.TestCase ):
lowerCamelCase__: List[Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self: Tuple ) -> int:
__UpperCAmelCase : str = SpeechaTextFeatureExtractionTester(self )
def _lowerCamelCase ( self: str , __lowerCamelCase: int ) -> List[str]:
self.assertTrue(np.all(np.mean(__SCREAMING_SNAKE_CASE , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase : Any = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
__UpperCAmelCase : int = feature_extractor(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__UpperCAmelCase : Any = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__UpperCAmelCase : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
__UpperCAmelCase : str = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="np" ).input_features
__UpperCAmelCase : Any = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__UpperCAmelCase : str = np.asarray(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Optional[Any] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="np" ).input_features
__UpperCAmelCase : str = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase : Any = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase : Union[str, Any] = [None, 16, None]
for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase : str = feature_extractor(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Dict = inputs.input_features
__UpperCAmelCase : Optional[int] = inputs.attention_mask
__UpperCAmelCase : Optional[int] = [np.sum(__SCREAMING_SNAKE_CASE ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowerCamelCase ( self: Tuple ) -> int:
__UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase : int = [None, 16, None]
for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase : List[Any] = feature_extractor(
__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="np" , return_attention_mask=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : str = inputs.input_features
__UpperCAmelCase : str = inputs.attention_mask
__UpperCAmelCase : Dict = [np.sum(__SCREAMING_SNAKE_CASE ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowerCamelCase ( self: int ) -> Dict:
__UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase : Dict = feature_extractor(
__SCREAMING_SNAKE_CASE , padding="max_length" , max_length=4 , truncation=__SCREAMING_SNAKE_CASE , return_tensors="np" , return_attention_mask=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase : int = inputs.input_features
__UpperCAmelCase : Any = inputs.attention_mask
__UpperCAmelCase : str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase : Tuple = feature_extractor(
__SCREAMING_SNAKE_CASE , padding="longest" , max_length=4 , truncation=__SCREAMING_SNAKE_CASE , return_tensors="np" , return_attention_mask=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase : int = inputs.input_features
__UpperCAmelCase : List[Any] = inputs.attention_mask
__UpperCAmelCase : List[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__UpperCAmelCase : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase : List[str] = feature_extractor(
__SCREAMING_SNAKE_CASE , padding="longest" , max_length=16 , truncation=__SCREAMING_SNAKE_CASE , return_tensors="np" , return_attention_mask=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase : List[str] = inputs.input_features
__UpperCAmelCase : Optional[Any] = inputs.attention_mask
__UpperCAmelCase : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _lowerCamelCase ( self: str ) -> Dict:
import torch
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : str = np.random.rand(1_00 , 32 ).astype(np.floataa )
__UpperCAmelCase : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase : Any = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCAmelCase : int = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[Any] ) -> List[str]:
from datasets import load_dataset
__UpperCAmelCase : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__UpperCAmelCase : Optional[Any] = ds.sort("id" ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
__UpperCAmelCase : Any = self._load_datasamples(1 )
__UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : List[Any] = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 382 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase__ = '''src/diffusers'''
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
lowerCamelCase__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
lowerCamelCase__ = '''
{0} = None
'''
lowerCamelCase__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
lowerCamelCase__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : Tuple =_re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowercase_ ( ):
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : int =f.readlines()
# Get to the point we do the actual imports for type checking
snake_case__ : Optional[Any] =0
snake_case__ : Any ={}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case__ : List[str] =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
snake_case__ : List[Any] =[]
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
snake_case__ : List[str] =lines[line_index]
snake_case__ : Any =_re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
snake_case__ : List[Any] =objects
else:
line_index += 1
return backend_specific_objects
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowercase_ ( SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
if backend_specific_objects is None:
snake_case__ : int =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case__ : Dict ={}
for backend, objects in backend_specific_objects.items():
snake_case__ : str ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
snake_case__ : List[Any] ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
snake_case__ : int =dummy_file
return dummy_files
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int]=False ):
"""simple docstring"""
snake_case__ : Dict =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case__ : int ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
snake_case__ : List[Any] =os.path.join(SCREAMING_SNAKE_CASE , '''utils''' )
snake_case__ : str ={
backend: os.path.join(SCREAMING_SNAKE_CASE , F'''dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py''' )
for backend in dummy_files.keys()
}
snake_case__ : Tuple ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : Optional[int] =f.read()
else:
snake_case__ : Union[str, Any] =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 381 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__SCREAMING_SNAKE_CASE : List[Any] ='Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def UpperCamelCase__ ( lowerCAmelCase__=None ):
if subparsers is not None:
lowercase = subparsers.add_parser("""tpu-config""" ,description=_description )
else:
lowercase = argparse.ArgumentParser("""Accelerate tpu-config command""" ,description=_description )
# Core arguments
lowercase = parser.add_argument_group(
"""Config Arguments""" ,"""Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" ,type=lowercase_ ,default=lowercase_ ,help="""Path to the config file to use for accelerate.""" ,)
config_args.add_argument(
"""--tpu_name""" ,default=lowercase_ ,help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" ,)
config_args.add_argument(
"""--tpu_zone""" ,default=lowercase_ ,help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" ,)
lowercase = parser.add_argument_group("""TPU Arguments""" ,"""Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" ,action="""store_true""" ,help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" ,)
pod_args.add_argument(
"""--command_file""" ,default=lowercase_ ,help="""The path to the file containing the commands to run on the pod on startup.""" ,)
pod_args.add_argument(
"""--command""" ,action="""append""" ,nargs="""+""" ,help="""A command to run on the pod. Can be passed multiple times.""" ,)
pod_args.add_argument(
"""--install_accelerate""" ,action="""store_true""" ,help="""Whether to install accelerate on the pod. Defaults to False.""" ,)
pod_args.add_argument(
"""--accelerate_version""" ,default="""latest""" ,help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" ,)
pod_args.add_argument(
"""--debug""" ,action="""store_true""" ,help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowercase_ ):
lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase = defaults.commands
if not args.tpu_name:
lowercase = defaults.tpu_name
if not args.tpu_zone:
lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
lowercase = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) ,lowercase_ ):
lowercase = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file ,"""r""" ) as f:
lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] ,lowercase_ ):
lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
lowercase = """; """.join(lowercase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {" ".join(lowercase_ )}""" )
return
subprocess.run(lowercase_ )
print("""Successfully setup pod.""" )
def UpperCamelCase__ ( ):
lowercase = tpu_command_parser()
lowercase = parser.parse_args()
tpu_command_launcher(lowercase_ )
| 720 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A_ :
def __init__( self : List[str] , snake_case__ : Union[str, Any] ):
lowercase = data
lowercase = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64)
lowercase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Tuple ):
lowercase = list(struct.unpack(""">16L""" , snake_case__ ) ) + [0] * 64
for i in range(16 , 80 ):
lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase = self.padding()
lowercase = self.split_blocks()
for block in self.blocks:
lowercase = self.expand_block(snake_case__ )
lowercase , lowercase , lowercase , lowercase , lowercase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowercase = (b & c) | ((~b) & d)
lowercase = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase = b ^ c ^ d
lowercase = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
lowercase = (b & c) | (b & d) | (c & d)
lowercase = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
lowercase = b ^ c ^ d
lowercase = 0Xc_a_6_2_c_1_d_6
lowercase , lowercase , lowercase , lowercase , lowercase = (
self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(snake_case__ , 30 ),
c,
d,
)
lowercase = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCamelCase__ ( ):
lowercase = b"""Test String"""
assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324
def UpperCamelCase__ ( ):
lowercase = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
lowercase = parser.parse_args()
lowercase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
lowercase = f.read()
else:
lowercase = bytes(lowerCAmelCase__ ,"""utf-8""" )
print(SHAaHash(lowerCAmelCase__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 72 | 0 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCamelCase__ :Union[str, Any] = False
try:
UpperCamelCase__ :Tuple = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class A:
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = [] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :List[str] = 0
_UpperCamelCase :Tuple = choices
_UpperCamelCase :Optional[Any] = prompt
if sys.platform == "win32":
_UpperCamelCase :Any = '''*'''
else:
_UpperCamelCase :Any = '''➔ '''
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "" ) -> str:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , SCREAMING_SNAKE_CASE__ )
else:
forceWrite(self.choices[index] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(SCREAMING_SNAKE_CASE__ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Any = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(SCREAMING_SNAKE_CASE__ )
move_cursor(SCREAMING_SNAKE_CASE__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def _UpperCamelCase( self ) -> List[str]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(SCREAMING_SNAKE_CASE__ )] for number in range(10 )] )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :int = int(chr(self.current_selection ) )
_UpperCamelCase :List[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , SCREAMING_SNAKE_CASE__ )
else:
return
else:
return
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ = 0 ) -> Optional[Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
_UpperCamelCase :List[Any] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(SCREAMING_SNAKE_CASE__ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
_UpperCamelCase :List[Any] = int(builtins.input() )
except ValueError:
_UpperCamelCase :Tuple = default_choice
else:
_UpperCamelCase :Tuple = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(SCREAMING_SNAKE_CASE__ , '''\n''' )
return choice
| 355 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ :Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ :int = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
for attribute in key.split('''.''' ):
_UpperCamelCase :Any = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_UpperCamelCase :Any = getattr(snake_case__ , snake_case__ ).shape
else:
_UpperCamelCase :Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_UpperCamelCase :str = value
elif weight_type == "weight_g":
_UpperCamelCase :Dict = value
elif weight_type == "weight_v":
_UpperCamelCase :Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase :str = value
else:
_UpperCamelCase :int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
_UpperCamelCase :Optional[int] = []
_UpperCamelCase :List[str] = fairseq_model.state_dict()
_UpperCamelCase :str = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase :Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase :Dict = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase :Optional[int] = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase :List[str] = True
if "*" in mapped_key:
_UpperCamelCase :List[str] = name.split(snake_case__ )[0].split('''.''' )[-2]
_UpperCamelCase :Tuple = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
_UpperCamelCase :List[Any] = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase :Union[str, Any] = '''weight_v'''
elif "weight" in name:
_UpperCamelCase :List[Any] = '''weight'''
elif "bias" in name:
_UpperCamelCase :List[Any] = '''bias'''
else:
_UpperCamelCase :List[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f"Unused weights: {unused_weights}" )
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
_UpperCamelCase :Optional[int] = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase :Optional[int] = name.split('''.''' )
_UpperCamelCase :Optional[Any] = int(items[0] )
_UpperCamelCase :List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_UpperCamelCase :Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_UpperCamelCase :Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_UpperCamelCase :int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_UpperCamelCase :Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def A_ ( snake_case__ , snake_case__ ) -> List[str]:
_UpperCamelCase :str = SEWConfig()
if is_finetuned:
_UpperCamelCase :Optional[int] = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase :Dict = model.cfg
_UpperCamelCase :Dict = fs_config.conv_bias
_UpperCamelCase :int = eval(fs_config.conv_feature_layers )
_UpperCamelCase :List[Any] = [x[0] for x in conv_layers]
_UpperCamelCase :Optional[int] = [x[1] for x in conv_layers]
_UpperCamelCase :Optional[int] = [x[2] for x in conv_layers]
_UpperCamelCase :str = '''gelu'''
_UpperCamelCase :Optional[int] = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_UpperCamelCase :List[Any] = 0.0
_UpperCamelCase :Optional[int] = fs_config.activation_fn.name
_UpperCamelCase :str = fs_config.encoder_embed_dim
_UpperCamelCase :Dict = 0.02
_UpperCamelCase :Optional[int] = fs_config.encoder_ffn_embed_dim
_UpperCamelCase :str = 1E-5
_UpperCamelCase :int = fs_config.encoder_layerdrop
_UpperCamelCase :Union[str, Any] = fs_config.encoder_attention_heads
_UpperCamelCase :List[str] = fs_config.conv_pos_groups
_UpperCamelCase :List[Any] = fs_config.conv_pos
_UpperCamelCase :List[str] = len(snake_case__ )
_UpperCamelCase :Optional[int] = fs_config.encoder_layers
_UpperCamelCase :Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase :List[Any] = model.cfg
_UpperCamelCase :List[Any] = fs_config.final_dropout
_UpperCamelCase :Dict = fs_config.layerdrop
_UpperCamelCase :Any = fs_config.activation_dropout
_UpperCamelCase :List[str] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase :Optional[Any] = fs_config.attention_dropout
_UpperCamelCase :List[Any] = fs_config.dropout_input
_UpperCamelCase :Dict = fs_config.dropout
_UpperCamelCase :int = fs_config.mask_channel_length
_UpperCamelCase :Tuple = fs_config.mask_channel_prob
_UpperCamelCase :int = fs_config.mask_length
_UpperCamelCase :Dict = fs_config.mask_prob
_UpperCamelCase :List[Any] = '''Wav2Vec2FeatureExtractor'''
_UpperCamelCase :Optional[Any] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def A_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ) -> int:
if is_finetuned:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase :Any = SEWConfig.from_pretrained(snake_case__ )
else:
_UpperCamelCase :List[str] = convert_config(model[0] , snake_case__ )
_UpperCamelCase :List[str] = model[0].eval()
_UpperCamelCase :Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
_UpperCamelCase :str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
if is_finetuned:
if dict_path:
_UpperCamelCase :int = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase :List[Any] = target_dict.pad_index
_UpperCamelCase :str = target_dict.bos_index
_UpperCamelCase :int = target_dict.pad_index
_UpperCamelCase :Dict = target_dict.bos_index
_UpperCamelCase :int = target_dict.eos_index
_UpperCamelCase :str = len(target_dict.symbols )
_UpperCamelCase :Optional[int] = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
_UpperCamelCase :int = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
_UpperCamelCase :Dict = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_UpperCamelCase :Tuple = SEWForCTC(snake_case__ )
else:
_UpperCamelCase :str = SEWModel(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase__ :List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase__ :Tuple = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 355 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE):
UpperCAmelCase : Dict = 'biogpt'
def __init__( self : List[str] , __snake_case : List[Any]=42384 , __snake_case : Optional[Any]=1024 , __snake_case : List[str]=24 , __snake_case : Optional[Any]=16 , __snake_case : Union[str, Any]=4096 , __snake_case : int="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=1024 , __snake_case : List[str]=0.02 , __snake_case : Optional[Any]=1E-1_2 , __snake_case : List[Any]=True , __snake_case : List[str]=True , __snake_case : List[str]=0.0 , __snake_case : List[str]=0.0 , __snake_case : Any=1 , __snake_case : int=0 , __snake_case : Optional[Any]=2 , **__snake_case : List[Any] , ) -> List[str]:
_a : List[str] = vocab_size
_a : List[Any] = max_position_embeddings
_a : List[str] = hidden_size
_a : str = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Optional[int] = intermediate_size
_a : Optional[Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : List[Any] = initializer_range
_a : Any = layer_norm_eps
_a : Optional[Any] = scale_embedding
_a : Optional[int] = use_cache
_a : Optional[int] = layerdrop
_a : Dict = activation_dropout
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 704 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = 'convbert'
def __init__( self : Optional[Any] , __snake_case : Union[str, Any]=30522 , __snake_case : Any=768 , __snake_case : Optional[Any]=12 , __snake_case : Tuple=12 , __snake_case : Optional[Any]=3072 , __snake_case : List[str]="gelu" , __snake_case : int=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[Any]=512 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=1E-1_2 , __snake_case : Any=1 , __snake_case : List[str]=0 , __snake_case : Union[str, Any]=2 , __snake_case : Union[str, Any]=768 , __snake_case : Dict=2 , __snake_case : Union[str, Any]=9 , __snake_case : Union[str, Any]=1 , __snake_case : Union[str, Any]=None , **__snake_case : Optional[Any] , ) -> List[Any]:
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
_a : str = vocab_size
_a : str = hidden_size
_a : List[str] = num_hidden_layers
_a : Any = num_attention_heads
_a : Optional[int] = intermediate_size
_a : Dict = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : Tuple = type_vocab_size
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
_a : Optional[int] = embedding_size
_a : int = head_ratio
_a : List[str] = conv_kernel_size
_a : Any = num_groups
_a : Tuple = classifier_dropout
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def snake_case_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 249 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( __UpperCAmelCase ):
def __init__( self : Tuple , snake_case__ : CLIPSegForImageSegmentation , snake_case__ : CLIPSegProcessor , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case__ : StableDiffusionSafetyChecker , snake_case__ : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__lowerCAmelCase = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , snake_case__ , standard_warn=snake_case__ )
__lowerCAmelCase = dict(scheduler.config )
__lowerCAmelCase = 1
__lowerCAmelCase = FrozenDict(snake_case__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__lowerCAmelCase = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , snake_case__ , standard_warn=snake_case__ )
__lowerCAmelCase = dict(scheduler.config )
__lowerCAmelCase = True
__lowerCAmelCase = FrozenDict(snake_case__ )
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=snake_case__ , segmentation_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
self.enable_attention_slicing(snake_case__ )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__lowerCAmelCase = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[int] , snake_case__ : Union[str, List[str]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : str , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Any , ):
"""simple docstring"""
__lowerCAmelCase = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__lowerCAmelCase = self.segmentation_model(**snake_case__ )
__lowerCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowerCAmelCase = self.numpy_to_pil(snake_case__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowerCAmelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , )
| 611 |
class a :
def __init__( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
__lowerCAmelCase = arr.split("," )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = [int(self.array[0] )] * len(self.array )
__lowerCAmelCase = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__lowerCAmelCase = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__lowerCAmelCase = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCamelCase_ = input("please input some numbers:")
UpperCamelCase_ = SubArray(whole_array)
UpperCamelCase_ = array.solve_sub_array()
print(("the results is:", re))
| 611 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['MaskFormerFeatureExtractor']
__UpperCamelCase = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 713 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def snake_case__ ( snake_case ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self ):
'''simple docstring'''
raise NotImplementedError()
| 185 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = (DDIMParallelScheduler,)
lowerCamelCase : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowercase__ ( self : Optional[int] , **_lowercase : Any ):
SCREAMING_SNAKE_CASE__ : int = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_lowercase )
return config
def lowercase__ ( self : Optional[Any] , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config(**_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = 10, 0.0
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase ).prev_sample
return sample
def lowercase__ ( self : List[str] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowercase__ ( self : Optional[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler_class(**_lowercase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def lowercase__ ( self : Optional[Any] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def lowercase__ ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def lowercase__ ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def lowercase__ ( self : List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def lowercase__ ( self : Optional[int] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowercase )
def lowercase__ ( self : str ):
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def lowercase__ ( self : List[Any] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowercase , num_inference_steps=_lowercase )
def lowercase__ ( self : Any ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowercase , eta=_lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Tuple = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = 10, 0.0
scheduler.set_timesteps(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE__ : List[str] = samplea.shape[0]
SCREAMING_SNAKE_CASE__ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE__ : Tuple = torch.arange(_lowercase )[0:3, None].repeat(1 , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.full_loop()
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Any = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowercase__ ( self : Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE__ : List[str] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowercase__ ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE__ : Dict = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 35 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 504 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: list[int] , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE ):
# Find denominations
while int(SCREAMING_SNAKE_CASE ) >= int(SCREAMING_SNAKE_CASE ):
total_value -= int(SCREAMING_SNAKE_CASE )
answer.append(SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_snake_case = []
_snake_case = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
_snake_case = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'Denomination {i}: ').strip()))
_snake_case = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
_snake_case = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_snake_case = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'Following is minimal change for {value}: ')
_snake_case = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 491 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''facebook/nllb-large-en-ro''': 1_0_2_4,
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
_snake_case = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_: Optional[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_: Optional[Any] = NllbTokenizer
SCREAMING_SNAKE_CASE_: List[int] = []
SCREAMING_SNAKE_CASE_: List[int] = []
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Tuple=False , **UpperCAmelCase_ : Any , ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_lowerCAmelCase = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
_lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : str ) -> None:
"""simple docstring"""
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[str] , **UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase_ )
_lowerCAmelCase = tgt_lang_id
return inputs
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str = "eng_Latn" , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : str = "fra_Latn" , **UpperCAmelCase_ : List[str] , ) -> BatchEncoding:
"""simple docstring"""
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Optional[Any] ) -> None:
"""simple docstring"""
_lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
_lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : str ) -> None:
"""simple docstring"""
_lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
_lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 491 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A__ : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A__ : int = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, '''models/bert/''' ) )
lowercase__ = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase, '''src/transformers/models/bert/modeling_bert.py''' ), os.path.join(self.transformer_dir, '''models/bert/modeling_bert.py''' ), )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def lowercase__ ( self : Optional[int], lowerCamelCase : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : List[Any]=None ):
'''simple docstring'''
lowercase__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase__ = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
lowercase__ = black.format_str(lowerCamelCase, mode=lowerCamelCase )
lowercase__ = os.path.join(self.transformer_dir, '''new_code.py''' )
with open(lowerCamelCase, '''w''', newline='''\n''' ) as f:
f.write(lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=lowerCamelCase )
with open(lowerCamelCase, '''r''' ) as f:
self.assertTrue(f.read(), lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', REFERENCE_CODE + '''\n''', )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', lowerCamelCase, )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', re.sub('''Bert''', '''TestModel''', lowerCamelCase ), )
# Copy consistency with a really long name
lowercase__ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""", F"""{long_class_name}LMPredictionHead""", re.sub('''Bert''', lowerCamelCase, lowerCamelCase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', lowerCamelCase, overwrite_result=re.sub('''Bert''', '''TestModel''', lowerCamelCase ), )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowercase__ , lowercase__ = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['''format_model_list'''] )
self.assertFalse(lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
lowercase__ , lowercase__ = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase )
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ , lowercase__ = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase, lowerCamelCase )
| 183 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowercase__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase__ ( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = AudioClassificationPipeline(model=lowerCamelCase, feature_extractor=lowerCamelCase )
# test with a raw waveform
lowercase__ = np.zeros((34_000,) )
lowercase__ = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def lowercase__ ( self : str, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = examples
lowercase__ = audio_classifier(lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
lowercase__ = audio_classifier(lowerCamelCase, top_k=1 )
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
self.run_torchaudio(lowerCamelCase )
@require_torchaudio
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
import datasets
# test with a local file
lowercase__ = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
lowercase__ = dataset[0]['''audio''']['''array''']
lowercase__ = audio_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
@require_torch
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase__ = pipeline('''audio-classification''', model=lowerCamelCase )
lowercase__ = np.ones((8_000,) )
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
lowercase__ = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
lowercase__ = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase__ = {'''array''': np.ones((8_000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
import datasets
lowercase__ = '''superb/wav2vec2-base-superb-ks'''
lowercase__ = pipeline('''audio-classification''', model=lowerCamelCase )
lowercase__ = datasets.load_dataset('''anton-l/superb_dummy''', '''ks''', split='''test''' )
lowercase__ = np.array(dataset[3]['''speech'''], dtype=np.floataa )
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=3 ), [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
], )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
| 183 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str]=7 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Dict=18 , __UpperCamelCase : Optional[Any]=30 , __UpperCamelCase : List[Any]=400 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : int=None , __UpperCamelCase : str=True , ) -> Optional[int]:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = apply_ocr
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
A = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'apply_ocr' ) )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCamelCase ( self : Any ) -> Any:
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __UpperCamelCase )
self.assertIsInstance(encoding.boxes , __UpperCamelCase )
# Test batched
A = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCamelCase ( self : int ) -> Any:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCamelCase ( self : List[str] ) -> Dict:
# with apply_OCR = True
A = LayoutLMvaImageProcessor()
from datasets import load_dataset
A = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
A = Image.open(ds[0]['file'] ).convert('RGB' )
A = image_processing(__UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
A = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCamelCase )
self.assertListEqual(encoding.boxes , __UpperCamelCase )
# with apply_OCR = False
A = LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase )
A = image_processing(__UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 711 |
from collections import deque
from .hash_table import HashTable
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Dict , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[Any] ) -> Union[str, Any]:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> int:
A = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__UpperCamelCase )
A = self.values[key]
def __UpperCamelCase ( self : Any ) -> List[str]:
return (
sum(self.charge_factor - len(__UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=None ) -> Union[str, Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(__UpperCamelCase , __UpperCamelCase )
| 224 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.0_2 , __a=1e-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = patch_norm
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = is_training
__lowerCAmelCase = scope
__lowerCAmelCase = use_labels
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = encoder_stride
def snake_case ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = SwinvaModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
__lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =(
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCAmelCase : str =(
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str =False
__UpperCAmelCase : List[Any] =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : Tuple =False
def snake_case ( self ):
__lowerCAmelCase = SwinvaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , embed_dim=37 )
def snake_case ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def snake_case ( self ):
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.attentions
__lowerCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = config.window_size**2
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__lowerCAmelCase = len(__a )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
__lowerCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowerCAmelCase = 2
self.assertEqual(out_len + added_hidden_states , len(__a ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swinv2 has a different seq_length
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reshaped_hidden_states[0].shape
__lowerCAmelCase = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(__a , __a , __a , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def snake_case ( self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(__a )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
__lowerCAmelCase = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__a )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__lowerCAmelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__a )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
__lowerCAmelCase = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 636 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =(KDPMaDiscreteScheduler,)
__UpperCAmelCase : Optional[Any] =1_0
def snake_case ( self , **__a ):
__lowerCAmelCase = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__a )
return config
def snake_case ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def snake_case ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = scheduler.scale_model_input(__a , __a )
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a )
__lowerCAmelCase = output.prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def snake_case ( self ):
if torch_device == "mps":
return
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = scheduler.scale_model_input(__a , __a )
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a )
__lowerCAmelCase = output.prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def snake_case ( self ):
if torch_device == "mps":
return
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowerCAmelCase = scheduler.scale_model_input(__a , __a )
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a )
__lowerCAmelCase = output.prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 636 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Any, lowerCamelCase : Tuple=13, lowerCamelCase : str=[30, 30], lowerCamelCase : Optional[Any]=2, lowerCamelCase : Any=3, lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : Union[str, Any]=32, lowerCamelCase : Optional[Any]=5, lowerCamelCase : List[str]=4, lowerCamelCase : Any=37, lowerCamelCase : List[Any]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Dict=0.1, lowerCamelCase : List[Any]=10, lowerCamelCase : Tuple=0.02, lowerCamelCase : int=3, lowerCamelCase : Optional[Any]=None, lowerCamelCase : str=8, lowerCamelCase : Tuple=10, )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =parent
lowerCamelCase__ : List[Any] =batch_size
lowerCamelCase__ : Optional[Any] =image_size
lowerCamelCase__ : str =patch_size
lowerCamelCase__ : Union[str, Any] =num_channels
lowerCamelCase__ : Optional[int] =is_training
lowerCamelCase__ : Union[str, Any] =use_labels
lowerCamelCase__ : Optional[int] =hidden_size
lowerCamelCase__ : List[Any] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Tuple =intermediate_size
lowerCamelCase__ : List[Any] =hidden_act
lowerCamelCase__ : Tuple =hidden_dropout_prob
lowerCamelCase__ : List[str] =attention_probs_dropout_prob
lowerCamelCase__ : Any =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Optional[int] =num_labels
lowerCamelCase__ : int =scope
lowerCamelCase__ : Dict =n_targets
lowerCamelCase__ : List[str] =num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase__ : Any =(image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase__ : Union[str, Any] =num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] )-> List[Any]:
lowerCamelCase__ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase__ : Any =None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase__ : Tuple =[]
for i in range(self.batch_size ):
lowerCamelCase__ : Tuple ={}
lowerCamelCase__ : int =torch.randint(
high=self.num_labels, size=(self.n_targets,), device=lowerCamelCase )
lowerCamelCase__ : Dict =torch.rand(self.n_targets, 4, device=lowerCamelCase )
labels.append(lowerCamelCase )
lowerCamelCase__ : List[str] =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Dict )-> int:
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : List[str] )-> Dict:
lowerCamelCase__ : List[Any] =YolosModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : str =model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[Any] )-> List[Any]:
lowerCamelCase__ : List[Any] =YolosForObjectDetection(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : str =model(pixel_values=lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase__ : List[str] =model(pixel_values=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : List[Any] )-> str:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str =config_and_inputs
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_a = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[str]=False )-> Optional[Any]:
lowerCamelCase__ : Any =super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase__ : int =[]
for i in range(self.model_tester.batch_size ):
lowerCamelCase__ : Union[str, Any] ={}
lowerCamelCase__ : List[str] =torch.ones(
size=(self.model_tester.n_targets,), device=lowerCamelCase, dtype=torch.long )
lowerCamelCase__ : str =torch.ones(
self.model_tester.n_targets, 4, device=lowerCamelCase, dtype=torch.float )
labels.append(lowerCamelCase )
lowerCamelCase__ : List[Any] =labels
return inputs_dict
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : str =YolosModelTester(self )
lowerCamelCase__ : Optional[int] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case ( self : List[str] )-> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : Optional[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def snake_case ( self : str )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] =model_class(lowerCamelCase )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : Any =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : List[str] )-> int:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : Dict )-> int:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] =True
# in YOLOS, the seq_len is different
lowerCamelCase__ : List[str] =self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Any =False
lowerCamelCase__ : Any =True
lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Dict =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : List[Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowerCamelCase__ : Union[str, Any] =len(lowerCamelCase )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : List[str] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : int =1
self.assertEqual(out_len + added_hidden_states, len(lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def snake_case ( self : List[Any] )-> Union[str, Any]:
def check_hidden_states_output(lowerCamelCase : str, lowerCamelCase : Dict, lowerCamelCase : str ):
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : int =outputs.hidden_states
lowerCamelCase__ : Tuple =getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
# YOLOS has a different seq_length
lowerCamelCase__ : Optional[Any] =self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowerCamelCase__ , lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[str] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> str:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Dict:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any =YolosModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self : Tuple )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] )-> str:
lowerCamelCase__ : str =YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(lowerCamelCase )
lowerCamelCase__ : Tuple =self.default_image_processor
lowerCamelCase__ : List[str] =prepare_img()
lowerCamelCase__ : Optional[Any] =image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] =model(inputs.pixel_values )
# verify outputs
lowerCamelCase__ : Tuple =torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]], device=lowerCamelCase, )
lowerCamelCase__ : int =torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]], device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
# verify postprocessing
lowerCamelCase__ : Union[str, Any] =image_processor.post_process_object_detection(
lowerCamelCase, threshold=0.3, target_sizes=[image.size[::-1]] )[0]
lowerCamelCase__ : Optional[Any] =torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(lowerCamelCase )
lowerCamelCase__ : Tuple =[75, 75, 17, 63, 17]
lowerCamelCase__ : Tuple =torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(lowerCamelCase )
self.assertEqual(len(results['''scores'''] ), 5 )
self.assertTrue(torch.allclose(results['''scores'''], lowerCamelCase, atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist(), lowerCamelCase )
self.assertTrue(torch.allclose(results['''boxes'''][0, :], lowerCamelCase ) )
| 625 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 1 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def __a ( _UpperCamelCase: Any , _UpperCamelCase: Optional[int] , _UpperCamelCase: List[Any] , _UpperCamelCase: List[str] ) -> Tuple:
"""simple docstring"""
_snake_case = sorted(zip(_UpperCamelCase , _UpperCamelCase ) , key=lambda _UpperCamelCase : x[0] / x[1] , reverse=_UpperCamelCase )
_snake_case , _snake_case = [i[0] for i in r], [i[1] for i in r]
_snake_case = list(accumulate(_UpperCamelCase ) )
_snake_case = bisect(_UpperCamelCase , _UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=None ) -> int:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
__lowercase = None
def _a ( self : int , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__lowercase = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowercase = str(distributed_port + 1 )
__lowercase = dist.new_group(ranks=_lowerCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=torch.floataa ) -> Tuple:
"""simple docstring"""
__lowercase = torch.empty(_lowerCAmelCase , dtype=_lowerCAmelCase )
dist.scatter(_lowerCAmelCase , src=0 , scatter_list=_lowerCAmelCase , group=self.process_group )
return target_tensor
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowercase = next((addr for addr in addrs if addr.startswith("""e""" )) , _lowerCAmelCase )
return ifname
def _a ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__lowercase , __lowercase = self._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase )
# distributed training
__lowercase = dist.get_world_size(group=self.process_group )
# gather logic
__lowercase = None
if self._is_main():
__lowercase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_lowerCAmelCase )]
dist.gather(torch.tensor(_lowerCAmelCase ) , dst=0 , gather_list=_lowerCAmelCase , group=self.process_group )
# scatter logic
__lowercase = question_hidden_states.shape[0]
__lowercase = []
__lowercase = []
if self._is_main():
assert len(_lowerCAmelCase ) == world_size
__lowercase , __lowercase = self._main_retrieve(torch.cat(_lowerCAmelCase ).numpy() , _lowerCAmelCase )
__lowercase , __lowercase = torch.tensor(_lowerCAmelCase ), torch.tensor(_lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_lowerCAmelCase )
| 80 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowercase ( _A ):
lowercase = 'vivit'
def __init__( self : Any , __lowerCamelCase : List[str]=2_24 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : int=[2, 16, 16] , __lowerCamelCase : Tuple=3 , __lowerCamelCase : int=7_68 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=30_72 , __lowerCamelCase : Union[str, Any]="gelu_fast" , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : str=1E-06 , __lowerCamelCase : Tuple=True , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = num_frames
lowercase = tubelet_size
lowercase = num_channels
lowercase = qkv_bias
super().__init__(**__lowerCamelCase )
| 720 |
from __future__ import annotations
from collections import deque
class __lowercase :
def __init__( self : Dict , __lowerCamelCase : list[str] ) -> List[str]:
'''simple docstring'''
lowercase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__lowerCamelCase )
self.set_fail_transitions()
def __a ( self : str , __lowerCamelCase : int , __lowerCamelCase : str ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __a ( self : List[Any] , __lowerCamelCase : str ) -> None:
'''simple docstring'''
lowercase = 0
for character in keyword:
lowercase = self.find_next_state(__lowerCamelCase , __lowerCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase = len(self.adlist ) - 1
else:
lowercase = next_state
self.adlist[current_state]["output"].append(__lowerCamelCase )
def __a ( self : int ) -> None:
'''simple docstring'''
lowercase = deque()
for node in self.adlist[0]["next_states"]:
q.append(__lowerCamelCase )
lowercase = 0
while q:
lowercase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__lowerCamelCase )
lowercase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__lowerCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
lowercase = self.adlist[state]['''fail_state''']
lowercase = self.find_next_state(
__lowerCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
lowercase = 0
lowercase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def __a ( self : List[str] , __lowerCamelCase : str ) -> dict[str, list[int]]:
'''simple docstring'''
lowercase = {} # returns a dict with keywords and list of its occurrences
lowercase = 0
for i in range(len(__lowerCamelCase ) ):
while (
self.find_next_state(__lowerCamelCase , string[i] ) is None
and current_state != 0
):
lowercase = self.adlist[current_state]['''fail_state''']
lowercase = self.find_next_state(__lowerCamelCase , string[i] )
if next_state is None:
lowercase = 0
else:
lowercase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase = []
result[key].append(i - len(__lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 479 | 0 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ : Optional[Any] = '''
import os
'''
snake_case_ : Optional[Any] = '''
def foo():
import os
return False
'''
snake_case_ : str = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case_ : Tuple = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case_ : Optional[Any] = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case_ : List[str] = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case_ : Optional[Any] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case_ : Optional[int] = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case_ : Optional[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case_ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case_ : int = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , A_ )
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] )-> Tuple:
"""simple docstring"""
_UpperCamelCase = os.path.join(A_ , "test_file.py" )
with open(A_ , "w" ) as _tmp_file:
_tmp_file.write(A_ )
_UpperCamelCase = get_imports(A_ )
assert parsed_imports == ["os"]
| 138 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_=None , A_=None ):
if attention_mask is None:
lowerCAmelCase__ : Optional[int] = tf.cast(tf.math.not_equal(A_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = OPTConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : str ,lowercase_ : List[Any] ,lowercase_ : Optional[int]=1_3 ,lowercase_ : str=7 ,lowercase_ : int=True ,lowercase_ : List[str]=False ,lowercase_ : Optional[int]=9_9 ,lowercase_ : Union[str, Any]=1_6 ,lowercase_ : Any=2 ,lowercase_ : Tuple=4 ,lowercase_ : Union[str, Any]=4 ,lowercase_ : int="gelu" ,lowercase_ : Tuple=0.1 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : List[str]=2_0 ,lowercase_ : Union[str, Any]=2 ,lowercase_ : Union[str, Any]=1 ,lowercase_ : Tuple=0 ,lowercase_ : List[Any]=1_6 ,lowercase_ : Union[str, Any]=1_6 ,):
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Dict = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : int = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = eos_token_id
lowerCAmelCase__ : List[Any] = pad_token_id
lowerCAmelCase__ : Any = bos_token_id
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : int = word_embed_proj_dim
lowerCAmelCase__ : Union[str, Any] = False
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowerCAmelCase__ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowerCAmelCase__ : int = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowerCAmelCase__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=lowercase_ ,**self.config_updates ,)
lowerCAmelCase__ : Optional[Any] = prepare_opt_inputs_dict(lowercase_ ,lowercase_ )
return config, inputs_dict
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : int ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Tuple = TFOPTModel(config=lowercase_ )
lowerCAmelCase__ : Tuple = inputs_dict['''input_ids''']
lowerCAmelCase__ : Optional[int] = input_ids[:1, :]
lowerCAmelCase__ : List[str] = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase__ : Any = 1
# first forward pass
lowerCAmelCase__ : Dict = model(lowercase_ ,attention_mask=lowercase_ ,use_cache=lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowerCAmelCase__ : Tuple = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
lowerCAmelCase__ : Optional[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
lowerCAmelCase__ : Tuple = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
lowerCAmelCase__ : Any = model(lowercase_ ,attention_mask=lowercase_ )[0]
lowerCAmelCase__ : Any = model(lowercase_ ,attention_mask=lowercase_ ,past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
lowerCAmelCase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ ,lowercase_ ,rtol=1E-3 )
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 10
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : List[Any] = TFOPTModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=lowercase_ )
def __lowerCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ : Optional[Any] ,lowercase_ : List[Any] ):
if hasattr(lowercase_ ,'''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ ,'''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase__ : Optional[int] = model_class(config=lowercase_ )
lowerCAmelCase__ : List[Any] = _get_word_embedding_weight(lowercase_ ,model.get_input_embeddings() )
lowerCAmelCase__ : Optional[Any] = _get_word_embedding_weight(lowercase_ ,model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = _get_word_embedding_weight(lowercase_ ,model.get_input_embeddings() )
lowerCAmelCase__ : List[Any] = _get_word_embedding_weight(lowercase_ ,model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase__ : Optional[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] ,lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase__ : Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase__ : str = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] ,lowercase_ )
lowerCAmelCase__ : Dict = True
for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase__ : List[Any] = False
self.assertTrue(lowercase_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
return tf.constant(A_ , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 99
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Dict = tf.ones((4, 1) ,dtype=tf.intaa ) * 2
lowerCAmelCase__ : Any = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 )
lowerCAmelCase__ : str = input_ids.shape[0]
lowerCAmelCase__ : str = OPTConfig(
vocab_size=self.vocab_size ,hidden_size=2_4 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=3_2 ,max_position_embeddings=4_8 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Tuple = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
lowerCAmelCase__ : str = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase__ : List[Any] = tf.not_equal(lowercase_ ,model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase__ : List[str] = model(input_ids=lowercase_ ,attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase__ : Optional[int] = (1, 1_1, 5_1_2)
self.assertEqual(output.shape ,lowercase_ )
lowerCAmelCase__ : Any = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase_ ,atol=4E-3 ) )
lowerCAmelCase__ : Any = tf.function(lowercase_ ,jit_compile=lowercase_ )
lowerCAmelCase__ : Optional[Any] = xla_generate(lowercase_ ,lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase_ ,atol=4E-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ):
super().setUp()
lowerCAmelCase__ : Optional[int] = '''facebook/opt-350m'''
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase__ : int = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase__ : Optional[Any] = tokenizer(lowercase_ ,return_tensors='''tf''' ,padding=lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : int = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
lowerCAmelCase__ : List[str] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowercase_ ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : int = tf.function(lowercase_ ,jit_compile=lowercase_ )
lowerCAmelCase__ : Tuple = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
self.assertTrue(np.allclose(lowercase_ ,lowercase_ ,atol=1E-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : List[str] ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Union[str, Any] = '''facebook/opt-125m'''
lowerCAmelCase__ : str = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : int = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase__ : int = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase__ : Union[str, Any] = tokenizer(lowercase_ ,return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : Any = model.generate(lowercase_ ,max_length=1_0 )
lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Union[str, Any] = '''facebook/opt-350m'''
lowerCAmelCase__ : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase__ : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase__ : Optional[Any] = '''left'''
# use different length sentences to test batching
lowerCAmelCase__ : Dict = [
'''Hello, my dog is a little''',
'''Today, I''',
]
lowerCAmelCase__ : Union[str, Any] = tokenizer(lowercase_ ,return_tensors='''tf''' ,padding=lowercase_ )
lowerCAmelCase__ : Optional[Any] = inputs['''input_ids''']
lowerCAmelCase__ : str = model.generate(input_ids=lowercase_ ,attention_mask=inputs['''attention_mask'''] )
lowerCAmelCase__ : Optional[Any] = tokenizer(sentences[0] ,return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : Optional[Any] = model.generate(input_ids=lowercase_ )
lowerCAmelCase__ : Optional[int] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] ,tf.intaa ) )
lowerCAmelCase__ : Any = tokenizer(sentences[1] ,return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : Optional[int] = model.generate(input_ids=lowercase_ ,max_length=model.config.max_length - num_paddings )
lowerCAmelCase__ : Any = tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ )
lowerCAmelCase__ : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowercase_ )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowercase_ )
lowerCAmelCase__ : List[Any] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowercase_ ,lowercase_ )
self.assertListEqual(lowercase_ ,[non_padded_sentence, padded_sentence] )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : List[Any] = '''facebook/opt-350m'''
lowerCAmelCase__ : Tuple = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowerCAmelCase__ : int = []
lowerCAmelCase__ : int = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase__ : List[str] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase__ : Optional[Any] = tokenizer(lowercase_ ,return_tensors='''tf''' ).input_ids
lowerCAmelCase__ : Optional[Any] = model.generate(lowercase_ ,max_length=1_0 )
lowerCAmelCase__ : str = tokenizer.batch_decode(lowercase_ ,skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ ,lowercase_ )
| 450 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
UpperCamelCase__ ='docs/source/en/_toctree.yml'
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
_SCREAMING_SNAKE_CASE : Any = [key for key, value in counts.items() if value > 1]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for duplicate_key in duplicates:
_SCREAMING_SNAKE_CASE : Dict = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(snake_case__, key=lambda __lowerCamelCase : s["title"].lower() )
def lowerCamelCase__ (__lowerCamelCase=False ):
with open(snake_case__, encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
_SCREAMING_SNAKE_CASE : int = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_SCREAMING_SNAKE_CASE : List[Any] = content[api_idx]["""sections"""]
# Then to the model doc
_SCREAMING_SNAKE_CASE : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_SCREAMING_SNAKE_CASE : List[str] = api_doc[model_idx]["""sections"""]
_SCREAMING_SNAKE_CASE : List[str] = [(idx, section) for idx, section in enumerate(snake_case__ ) if """sections""" in section]
_SCREAMING_SNAKE_CASE : Optional[Any] = False
for idx, modality_doc in modalities_docs:
_SCREAMING_SNAKE_CASE : int = modality_doc["""sections"""]
_SCREAMING_SNAKE_CASE : Tuple = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
_SCREAMING_SNAKE_CASE : Optional[Any] = True
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = new_modality_doc
if diff:
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = model_doc
_SCREAMING_SNAKE_CASE : Union[str, Any] = api_doc
with open(snake_case__, "w", encoding="utf-8" ) as f:
f.write(yaml.dump(snake_case__, allow_unicode=snake_case__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 706 |
def lowerCamelCase__ (__lowerCamelCase = 10**9 ):
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : Any = 2
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_SCREAMING_SNAKE_CASE : Tuple = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"{solution() = }")
| 381 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = '''ylacombe/bark-small'''
snake_case__ = tempfile.mkdtemp()
snake_case__ = '''en_speaker_1'''
snake_case__ = '''This is a test string'''
snake_case__ = '''speaker_embeddings_path.json'''
snake_case__ = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self:Any , **_a:List[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.get_tokenizer()
snake_case__ = BarkProcessor(tokenizer=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case__ = 35
snake_case__ = 2
snake_case__ = 8
snake_case__ = {
'''semantic_prompt''': np.ones(_a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case__ = processor(text=self.input_string , voice_preset=_a )
snake_case__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case__ = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(_a , **_a )
snake_case__ = processor(text=self.input_string , voice_preset=_a )
snake_case__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.get_tokenizer()
snake_case__ = BarkProcessor(tokenizer=_a )
snake_case__ = processor(text=self.input_string )
snake_case__ = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 33 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: str = logging.get_logger(__name__)
A: Optional[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[Any] = 'mra'
def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="full" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : List[str] = layer_norm_eps
UpperCAmelCase : List[str] = position_embedding_type
UpperCAmelCase : List[Any] = block_per_row
UpperCAmelCase : int = approx_mode
UpperCAmelCase : Optional[Any] = initial_prior_first_n_blocks
UpperCAmelCase : Optional[Any] = initial_prior_diagonal_n_blocks
| 713 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A: List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = 'AutoTokenizer'
__lowerCAmelCase : str = ['tokenizer']
__lowerCAmelCase : Any = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
UpperCAmelCase : Any = get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
UpperCAmelCase : Optional[int] = None
else:
with open(_SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
UpperCAmelCase : List[str] = json.load(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(tokenizer=_SCREAMING_SNAKE_CASE , speaker_embeddings=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , _SCREAMING_SNAKE_CASE="speaker_embeddings" , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """v2""" ) , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = {}
UpperCAmelCase : Union[str, Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCAmelCase : Optional[Any] = self._load_voice_preset(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , _SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
UpperCAmelCase : Tuple = tmp_dict
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
super().save_pretrained(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.speaker_embeddings[voice_preset]
UpperCAmelCase : List[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
UpperCAmelCase : List[str] = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
UpperCAmelCase : List[str] = np.load(_SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = None ) -> List[str]:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="pt" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
if voice_preset is not None and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCAmelCase : Dict = self._load_voice_preset(_SCREAMING_SNAKE_CASE )
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not voice_preset.endswith(""".npz""" ):
UpperCAmelCase : Tuple = voice_preset + """.npz"""
UpperCAmelCase : Union[str, Any] = np.load(_SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.tokenizer(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
UpperCAmelCase : List[Any] = voice_preset
return encoded_text
| 359 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCamelCase = None
__lowerCamelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCamelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _snake_case :
"""simple docstring"""
lowerCamelCase_ = True
lowerCamelCase_ = None
# Automatically constructed
lowerCamelCase_ = "PIL.Image.Image"
lowerCamelCase_ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCamelCase_ = field(default='''Image''' ,init=a_ ,repr=a_ )
def __call__( self ) -> Tuple:
"""simple docstring"""
return self.pa_type
def lowercase_ ( self , a ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(a , a ):
_A = np.array(a )
if isinstance(a , a ):
return {"path": value, "bytes": None}
elif isinstance(a , a ):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a )
elif isinstance(a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowercase_ ( self , a , a=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
_A = {}
_A = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a ):
_A = PIL.Image.open(a )
else:
_A = path.split('''::''' )[-1]
try:
_A = string_to_dict(a , config.HUB_DATASETS_URL )["""repo_id"""]
_A = token_per_repo_id.get(a )
except ValueError:
_A = None
with xopen(a , '''rb''' , use_auth_token=a ) as f:
_A = BytesIO(f.read() )
_A = PIL.Image.open(bytes_ )
else:
_A = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def lowercase_ ( self , a ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
_A = pa.array([None] * len(a ) , type=pa.binary() )
_A = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_A = pa.array([None] * len(a ) , type=pa.string() )
_A = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
_A = storage.field('''bytes''' )
else:
_A = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
_A = storage.field('''path''' )
else:
_A = pa.array([None] * len(a ) , type=pa.string() )
_A = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_A = pa.array(
[encode_np_array(np.array(a ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_A = pa.array([None] * len(a ) , type=pa.string() )
_A = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def lowercase_ ( self , a ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a ):
with xopen(a , '''rb''' ) as f:
_A = f.read()
return bytes_
_A = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_A = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
_A = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def UpperCAmelCase__ ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_A = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase__ ( __snake_case ) -> bytes:
_A = BytesIO()
if image.format in list_image_compression_formats():
_A = image.format
else:
_A = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__A , format=__A )
return buffer.getvalue()
def UpperCAmelCase__ ( __snake_case ) -> dict:
if hasattr(__A , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def UpperCAmelCase__ ( __snake_case ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
_A = array.dtype
_A = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
_A = dtype.kind
_A = dtype.itemsize
_A = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_A = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_A = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_A = dtype_byteorder + dtype_kind + str(__A )
_A = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
_A = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def UpperCAmelCase__ ( __snake_case ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
_A = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
_A = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
_A = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 317 |
def snake_case_ (__A : list[int] , __A : list[int] ) -> None:
__lowerCAmelCase : Union[str, Any] = len(__A )
print("""The following activities are selected:""" )
# The first activity is always selected
__lowerCAmelCase : str = 0
print(__A , end=""",""" )
# Consider rest of the activities
for j in range(__A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__A , end=""",""" )
__lowerCAmelCase : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [1, 3, 0, 5, 8, 5]
__UpperCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class snake_case_ :
"""simple docstring"""
pass
| 455 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """ClapFeatureExtractor"""
_lowerCamelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self ,lowercase ,lowercase):
"""simple docstring"""
super().__init__(lowercase ,lowercase)
def __call__( self ,lowercase=None ,lowercase=None ,lowercase=None ,**lowercase):
"""simple docstring"""
UpperCAmelCase_ : Dict = kwargs.pop("sampling_rate" ,lowercase)
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none.")
if text is not None:
UpperCAmelCase_ : List[str] = self.tokenizer(lowercase ,return_tensors=lowercase ,**lowercase)
if audios is not None:
UpperCAmelCase_ : str = self.feature_extractor(
lowercase ,sampling_rate=lowercase ,return_tensors=lowercase ,**lowercase)
if text is not None and audios is not None:
UpperCAmelCase_ : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase) ,tensor_type=lowercase)
def A_ ( self ,*lowercase ,**lowercase):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase ,**lowercase)
def A_ ( self ,*lowercase ,**lowercase):
"""simple docstring"""
return self.tokenizer.decode(*lowercase ,**lowercase)
@property
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = self.tokenizer.model_input_names
UpperCAmelCase_ : str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 455 | 1 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : Any=False ) -> Tuple:
try:
_UpperCAmelCase : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase : Any = strtobool(_lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
lowerCamelCase__ : Tuple = parse_flag_from_env('''RUN_SLOW''', default=False)
def UpperCamelCase ( _lowerCAmelCase : str ) -> str:
return unittest.skip("""Test was skipped""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : str ) -> Any:
return unittest.skipUnless(_run_slow_tests, """test is slow""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Any:
return unittest.skipUnless(not torch.cuda.is_available(), """test requires only a CPU""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> int:
return unittest.skipUnless(torch.cuda.is_available(), """test requires a GPU""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(is_xpu_available(), """test requires a XPU""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ) -> Union[str, Any]:
return unittest.skipUnless(is_mps_available(), """test requires a `mps` backend support in `torch`""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> Dict:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available(), """test requires the Hugging Face suite""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ) -> str:
return unittest.skipUnless(is_bnb_available(), """test requires the bitsandbytes library""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Dict ) -> Dict:
return unittest.skipUnless(is_tpu_available(), """test requires TPU""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1, """test requires a GPU""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() == 1, """test requires a XPU""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Dict ) -> int:
return unittest.skipUnless(torch.cuda.device_count() > 1, """test requires multiple GPUs""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1, """test requires multiple XPUs""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> int:
return unittest.skipUnless(is_safetensors_available(), """test requires safetensors""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Tuple ) -> str:
return unittest.skipUnless(is_deepspeed_available(), """test requires DeepSpeed""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(is_torch_version(""">=""", """1.12.0""" ), """test requires torch version >= 1.12.0""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : str=None, _lowerCAmelCase : Optional[Any]=None ) -> Tuple:
if test_case is None:
return partial(_lowerCAmelCase, version=_lowerCAmelCase )
return unittest.skipUnless(is_torch_version(""">=""", _lowerCAmelCase ), f'''test requires torch version >= {version}''' )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : int ) -> List[str]:
return unittest.skipUnless(is_tensorboard_available(), """test requires Tensorboard""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : str ) -> List[str]:
return unittest.skipUnless(is_wandb_available(), """test requires wandb""" )(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> str:
return unittest.skipUnless(is_comet_ml_available(), """test requires comet_ml""" )(_lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( _lowerCAmelCase : Tuple ) -> str:
return unittest.skipUnless(
_atleast_one_tracker_available, """test requires at least one tracker to be available and for `comet_ml` to not be installed""", )(_lowerCAmelCase )
class _UpperCAmelCase ( unittest.TestCase):
__a : Optional[Any] = True
@classmethod
def __snake_case ( cls ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ) -> Optional[Any]:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_A )
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> str:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self , _A ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = mocks if isinstance(_A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> Tuple:
_UpperCAmelCase : Optional[Any] = AcceleratorState()
_UpperCAmelCase : Optional[Any] = tensor[None].clone().to(state.device )
_UpperCAmelCase : str = gather(_lowerCAmelCase ).cpu()
_UpperCAmelCase : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i], _lowerCAmelCase ):
return False
return True
class _UpperCAmelCase :
def __init__( self , _A , _A , _A ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = returncode
_UpperCAmelCase : int = stdout
_UpperCAmelCase : Union[str, Any] = stderr
async def UpperCamelCase ( _lowerCAmelCase : List[str], _lowerCAmelCase : Any ) -> Tuple:
while True:
_UpperCAmelCase : Union[str, Any] = await stream.readline()
if line:
callback(_lowerCAmelCase )
else:
break
async def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : Optional[int]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : str=None, _lowerCAmelCase : int=False, _lowerCAmelCase : int=False ) -> _RunOutput:
if echo:
print("""\nRunning: """, """ """.join(_lowerCAmelCase ) )
_UpperCAmelCase : Union[str, Any] = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=_lowerCAmelCase, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=_lowerCAmelCase, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Union[str, Any] = []
def tee(_lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : List[str], _lowerCAmelCase : Any="" ):
_UpperCAmelCase : Dict = line.decode("""utf-8""" ).rstrip()
sink.append(_lowerCAmelCase )
if not quiet:
print(_lowerCAmelCase, _lowerCAmelCase, file=_lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda _lowerCAmelCase : tee(_lowerCAmelCase, _lowerCAmelCase, sys.stdout, label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr, lambda _lowerCAmelCase : tee(_lowerCAmelCase, _lowerCAmelCase, sys.stderr, label="""stderr:""" ) ) ),
], timeout=_lowerCAmelCase, )
return _RunOutput(await p.wait(), _lowerCAmelCase, _lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Dict=None, _lowerCAmelCase : Optional[int]=180, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : Optional[int]=True ) -> _RunOutput:
_UpperCAmelCase : Dict = asyncio.get_event_loop()
_UpperCAmelCase : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(_lowerCAmelCase, env=_lowerCAmelCase, stdin=_lowerCAmelCase, timeout=_lowerCAmelCase, quiet=_lowerCAmelCase, echo=_lowerCAmelCase ) )
_UpperCAmelCase : str = """ """.join(_lowerCAmelCase )
if result.returncode > 0:
_UpperCAmelCase : Union[str, Any] = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _UpperCAmelCase ( __a):
pass
def UpperCamelCase ( _lowerCAmelCase : List[str], _lowerCAmelCase : Optional[Any]=False ) -> List[Any]:
try:
_UpperCAmelCase : int = subprocess.check_output(_lowerCAmelCase, stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_lowerCAmelCase, """decode""" ):
_UpperCAmelCase : Dict = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 238 |
"""simple docstring"""
import argparse
import datetime
def UpperCamelCase ( _lowerCAmelCase : str ) -> str:
_UpperCAmelCase : List[str] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
_UpperCAmelCase : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCAmelCase ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
_UpperCAmelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
_UpperCAmelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
_UpperCAmelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
_UpperCAmelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
_UpperCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
_UpperCAmelCase : List[str] = datetime.date(int(_lowerCAmelCase ), int(_lowerCAmelCase ), int(_lowerCAmelCase ) )
# Start math
if m <= 2:
_UpperCAmelCase : int = y - 1
_UpperCAmelCase : List[str] = m + 12
# maths var
_UpperCAmelCase : int = int(str(_lowerCAmelCase )[:2] )
_UpperCAmelCase : int = int(str(_lowerCAmelCase )[2:] )
_UpperCAmelCase : int = int(2.6 * m - 5.39 )
_UpperCAmelCase : int = int(c / 4 )
_UpperCAmelCase : int = int(k / 4 )
_UpperCAmelCase : int = int(d + k )
_UpperCAmelCase : int = int(t + u + v + x )
_UpperCAmelCase : int = int(z - (2 * c) )
_UpperCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
_UpperCAmelCase : str = f'''Your date {date_input}, is a {days[str(_lowerCAmelCase )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCamelCase__ : int = parser.parse_args()
zeller(args.date_input)
| 238 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ : Any = TypeVar('T')
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: T ):
"""simple docstring"""
A__ = data
A__ = self
A__ = 0
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self: Optional[int] ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: Dict , UpperCamelCase: T ):
"""simple docstring"""
A__ = DisjointSetTreeNode(UpperCamelCase )
def UpperCamelCase ( self: int , UpperCamelCase: T ):
"""simple docstring"""
A__ = self.map[data]
if elem_ref != elem_ref.parent:
A__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: DisjointSetTreeNode[T] , UpperCamelCase: DisjointSetTreeNode[T] ):
"""simple docstring"""
if nodea.rank > nodea.rank:
A__ = nodea
else:
A__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCamelCase ( self: str , UpperCamelCase: T , UpperCamelCase: T ):
"""simple docstring"""
self.link(self.find_set(UpperCamelCase ) , self.find_set(UpperCamelCase ) )
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: Tuple , UpperCamelCase: T ):
"""simple docstring"""
if node not in self.connections:
A__ = {}
def UpperCamelCase ( self: List[str] , UpperCamelCase: T , UpperCamelCase: T , UpperCamelCase: int ):
"""simple docstring"""
self.add_node(UpperCamelCase )
self.add_node(UpperCamelCase )
A__ = weight
A__ = weight
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = []
A__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCamelCase : x[2] )
# creating the disjoint set
A__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCamelCase )
# MST generation
A__ = 0
A__ = 0
A__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
A__ , A__ , A__ = edges[index]
index += 1
A__ = disjoint_set.find_set(UpperCamelCase )
A__ = disjoint_set.find_set(UpperCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCamelCase , UpperCamelCase , UpperCamelCase )
disjoint_set.union(UpperCamelCase , UpperCamelCase )
return graph
| 500 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Any = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 500 | 1 |
from datetime import datetime as dt
import os
from github import Github
A : Optional[Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase__ ( ) -> Dict:
_lowercase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowercase = g.get_repo("""huggingface/transformers""" )
_lowercase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowercase = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE_ : i.created_at , reverse=snake_case__ )
_lowercase = comments[0] if len(snake_case__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 287 |
"""simple docstring"""
from maths.prime_check import is_prime
def _snake_case ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
A = F'Input value of [number={number}] must be an integer'
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''new-model'''
if is_tf_available():
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = NewModelConfig
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : int ) ->int:
UpperCAmelCase_ = '''bert-base-cased'''
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = '''bert-base-cased'''
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->int:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_probability
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
def lowerCAmelCase__ ( self : int ) ->int:
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
UpperCAmelCase_ = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = copy.deepcopy(model.config )
UpperCAmelCase_ = ['''FunnelBaseModel''']
UpperCAmelCase_ = TFAutoModel.from_config(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModel.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
try:
AutoConfig.register('''new-model''' , UpperCAmelCase__ )
UpperCAmelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase__ ):
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ )
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__ ):
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ = BertModelTester(self ).get_config()
UpperCAmelCase_ = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase_ = auto_class.from_config(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ = auto_class.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
with self.assertRaisesRegex(
UpperCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ = TFAutoModel.from_pretrained('''bert-base''' )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
with self.assertRaisesRegex(
UpperCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ = TFAutoModel.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''' )
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
with self.assertRaisesRegex(
UpperCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
UpperCAmelCase_ = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
with self.assertRaisesRegex(UpperCAmelCase__ , '''Use `from_pt=True` to load this model''' ):
UpperCAmelCase_ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
# Make sure we have cached the model.
UpperCAmelCase_ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase_ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
UpperCAmelCase_ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 708 |
'''simple docstring'''
import re
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = split_input(_UpperCamelCase )
if upper:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return to_simple_case(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 43 | 0 |
def a (lowerCAmelCase__ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
"""simple docstring"""
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 624 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase : Tuple =logging.getLogger(__name__)
def A__ ( __A , __A ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
_snake_case = field(metadata={'help': 'Should contain the data files for the task.'} )
_snake_case = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , A__ )
# Set seed
set_seed(training_args.seed )
try:
_lowerCamelCase : Union[str, Any] = processors[data_args.task_name]()
_lowerCamelCase : Optional[Any] = processor.get_labels()
_lowerCamelCase : str = len(A__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCamelCase : Union[str, Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCamelCase : Union[str, Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__A ) -> Dict:
_lowerCamelCase : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A__ , p.label_ids )}
# Data collator
_lowerCamelCase : str = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCamelCase : List[Any] = Trainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCamelCase : Any = trainer.evaluate()
_lowerCamelCase : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(A__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , A__ , A__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(A__ )
return results
def A__ ( __A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | 0 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : str , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Optional[Any] , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCAmelCase__ , )
_UpperCAmelCase : Dict = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : Dict = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : Dict = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Any = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCAmelCase__ ), "This is a local test"
| 494 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
if not isinstance(a_, a_ ):
_UpperCAmelCase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a_ )
if number < 0:
return False
_UpperCAmelCase : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 494 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
__magic_name__: List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
__magic_name__: Dict = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
__magic_name__: List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__magic_name__: Optional[Any] = value
elif weight_type == "weight_g":
__magic_name__: Optional[int] = value
elif weight_type == "weight_v":
__magic_name__: Union[str, Any] = value
elif weight_type == "bias":
__magic_name__: List[Any] = value
else:
__magic_name__: Optional[Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Union[str, Any]:
__magic_name__: List[Any] = []
__magic_name__: Union[str, Any] = fairseq_model.state_dict()
__magic_name__: Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__: List[str] = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__magic_name__: List[str] = True
else:
for key, mapped_key in MAPPING.items():
__magic_name__: Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__magic_name__: Dict = True
if "*" in mapped_key:
__magic_name__: List[Any] = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
__magic_name__: Any = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
__magic_name__: Any = """weight_g"""
elif "weight_v" in name:
__magic_name__: Any = """weight_v"""
elif "weight" in name:
__magic_name__: Optional[int] = """weight"""
elif "bias" in name:
__magic_name__: int = """bias"""
else:
__magic_name__: Optional[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> str:
__magic_name__: str = full_name.split("""conv_layers.""" )[-1]
__magic_name__: Tuple = name.split(""".""" )
__magic_name__: Dict = int(items[0] )
__magic_name__: List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__magic_name__: Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__magic_name__: List[str] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__magic_name__: List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__magic_name__: Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ) -> List[Any]:
__magic_name__: Any = SEWConfig()
if is_finetuned:
__magic_name__: int = model.wav_encoder.wav_model.cfg
else:
__magic_name__: Optional[Any] = model.cfg
__magic_name__: Optional[Any] = fs_config.conv_bias
__magic_name__: Union[str, Any] = eval(fs_config.conv_feature_layers )
__magic_name__: List[str] = [x[0] for x in conv_layers]
__magic_name__: str = [x[1] for x in conv_layers]
__magic_name__: Dict = [x[2] for x in conv_layers]
__magic_name__: Tuple = """gelu"""
__magic_name__: int = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__magic_name__: List[Any] = 0.0
__magic_name__: str = fs_config.activation_fn.name
__magic_name__: Union[str, Any] = fs_config.encoder_embed_dim
__magic_name__: Any = 0.02
__magic_name__: Union[str, Any] = fs_config.encoder_ffn_embed_dim
__magic_name__: Optional[Any] = 1E-5
__magic_name__: int = fs_config.encoder_layerdrop
__magic_name__: Optional[Any] = fs_config.encoder_attention_heads
__magic_name__: Dict = fs_config.conv_pos_groups
__magic_name__: Any = fs_config.conv_pos
__magic_name__: int = len(__UpperCAmelCase )
__magic_name__: int = fs_config.encoder_layers
__magic_name__: Optional[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__magic_name__: Tuple = model.cfg
__magic_name__: List[Any] = fs_config.final_dropout
__magic_name__: Optional[int] = fs_config.layerdrop
__magic_name__: Union[str, Any] = fs_config.activation_dropout
__magic_name__: Dict = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__magic_name__: Any = fs_config.attention_dropout
__magic_name__: int = fs_config.dropout_input
__magic_name__: Any = fs_config.dropout
__magic_name__: Any = fs_config.mask_channel_length
__magic_name__: Optional[int] = fs_config.mask_channel_prob
__magic_name__: Any = fs_config.mask_length
__magic_name__: Dict = fs_config.mask_prob
__magic_name__: List[str] = """Wav2Vec2FeatureExtractor"""
__magic_name__: Any = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=True ) -> List[Any]:
if is_finetuned:
__magic_name__, __magic_name__, __magic_name__: int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__magic_name__, __magic_name__, __magic_name__: str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__magic_name__: int = SEWConfig.from_pretrained(__UpperCAmelCase )
else:
__magic_name__: List[str] = convert_config(model[0] , __UpperCAmelCase )
__magic_name__: Optional[Any] = model[0].eval()
__magic_name__: Any = True if config.feat_extract_norm == """layer""" else False
__magic_name__: Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
if is_finetuned:
if dict_path:
__magic_name__: Union[str, Any] = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__: str = target_dict.pad_index
__magic_name__: List[str] = target_dict.bos_index
__magic_name__: Optional[int] = target_dict.pad_index
__magic_name__: Tuple = target_dict.bos_index
__magic_name__: str = target_dict.eos_index
__magic_name__: List[str] = len(target_dict.symbols )
__magic_name__: int = os.path.join(__UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __UpperCAmelCase )
__magic_name__: Tuple = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , )
__magic_name__: str = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
__magic_name__: List[Any] = SEWForCTC(__UpperCAmelCase )
else:
__magic_name__: Optional[int] = SEWModel(__UpperCAmelCase )
feature_extractor.save_pretrained(__UpperCAmelCase )
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__lowerCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 213 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any=0 ) -> Dict:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : int ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 213 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a : Optional[int] = logging.getLogger(__name__)
@dataclass
class _a ( _lowerCAmelCase ):
A = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
A = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 556 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
A = '''timm_backbone'''
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = backbone
UpperCAmelCase_: Optional[Any] = num_channels
UpperCAmelCase_: Optional[Any] = features_only
UpperCAmelCase_: Any = use_pretrained_backbone
UpperCAmelCase_: List[str] = True
UpperCAmelCase_: Union[str, Any] = out_indices if out_indices is not None else (-1,)
| 556 | 1 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def A ( _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
super().__init__()
__lowerCAmelCase : Optional[int] = module
__lowerCAmelCase : int = nn.Sequential(
nn.Linear(module.in_features , __A , bias=__A ) , nn.Linear(__A , module.out_features , bias=__A ) , )
__lowerCAmelCase : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case ( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.module(__A , *__A , **__A ) + self.adapter(__A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_snake_case = '''bigscience/bloom-1b7'''
# Constant values
_snake_case = 2.109_659_552_692_574
_snake_case = '''Hello my name is'''
_snake_case = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_snake_case = 10
def snake_case ( self ) -> int:
# Models and tokenizer
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
def snake_case ( self ) -> List[Any]:
super().setUp()
# Models and tokenizer
__lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__A , device_map='auto' )
def snake_case ( self ) -> Tuple:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : List[str] = self.model_abit.config
self.assertTrue(hasattr(__A , 'quantization_config' ) )
__lowerCAmelCase : Optional[int] = config.to_dict()
__lowerCAmelCase : List[Any] = config.to_diff_dict()
__lowerCAmelCase : Optional[int] = config.to_json_string()
def snake_case ( self ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
__lowerCAmelCase : Tuple = self.model_fpaa.get_memory_footprint()
__lowerCAmelCase : Dict = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowerCAmelCase : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case ( self ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case ( self ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' )
__lowerCAmelCase : Optional[int] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__A ) , self.EXPECTED_OUTPUTS )
def snake_case ( self ) -> Optional[int]:
__lowerCAmelCase : Tuple = BitsAndBytesConfig()
__lowerCAmelCase : str = True
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__A , device_map='auto' )
__lowerCAmelCase : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' )
__lowerCAmelCase : List[str] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__A ) , self.EXPECTED_OUTPUTS )
def snake_case ( self ) -> Dict:
with self.assertRaises(__A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__A )
def snake_case ( self ) -> Union[str, Any]:
__lowerCAmelCase : Optional[Any] = BitsAndBytesConfig()
with self.assertRaises(__A ):
__lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__A , load_in_abit=__A , device_map='auto' , bnb_abit_quant_type='nf4' , )
def snake_case ( self ) -> Tuple:
with self.assertRaises(__A ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__A ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowerCAmelCase : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__lowerCAmelCase : Optional[int] = self.model_fpaa.to(torch.floataa )
__lowerCAmelCase : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowerCAmelCase : Union[str, Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowerCAmelCase : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__lowerCAmelCase : int = self.model_fpaa.float()
def snake_case ( self ) -> Tuple:
__lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__A , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case ( cls ) -> int:
__lowerCAmelCase : Optional[int] = "t5-small"
__lowerCAmelCase : Tuple = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(cls.model_name )
__lowerCAmelCase : Optional[Any] = "Translate in German: Hello, my dog is cute"
def snake_case ( self ) -> List[Any]:
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ) -> Any:
from transformers import TaForConditionalGeneration
__lowerCAmelCase : Tuple = TaForConditionalGeneration._keep_in_fpaa_modules
__lowerCAmelCase : Dict = None
# test with `t5-small`
__lowerCAmelCase : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__A , device_map='auto' )
__lowerCAmelCase : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowerCAmelCase : Any = model.generate(**__A )
# test with `flan-t5-small`
__lowerCAmelCase : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__A , device_map='auto' )
__lowerCAmelCase : Any = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowerCAmelCase : int = model.generate(**__A )
__lowerCAmelCase : Optional[int] = modules
def snake_case ( self ) -> int:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowerCAmelCase : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__A , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowerCAmelCase : Tuple = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowerCAmelCase : str = model.generate(**__A )
# test with `flan-t5-small`
__lowerCAmelCase : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__A , device_map='auto' )
__lowerCAmelCase : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowerCAmelCase : List[Any] = model.generate(**__A )
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
def snake_case ( self ) -> str:
super().setUp()
# model_name
__lowerCAmelCase : Union[str, Any] = "bigscience/bloom-560m"
__lowerCAmelCase : int = "t5-small"
# Different types of model
__lowerCAmelCase : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__A , device_map='auto' )
# Sequence classification model
__lowerCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__A , device_map='auto' )
# CausalLM model
__lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__A , device_map='auto' )
# Seq2seq model
__lowerCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__A , device_map='auto' )
def snake_case ( self ) -> List[str]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ) -> Any:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
def snake_case ( self ) -> List[Any]:
super().setUp()
def snake_case ( self ) -> Union[str, Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ) -> Any:
__lowerCAmelCase : List[Any] = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowerCAmelCase : List[Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
def snake_case ( self ) -> List[Any]:
super().setUp()
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__A , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowerCAmelCase : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowerCAmelCase : Optional[Any] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__A ) , self.EXPECTED_OUTPUTS )
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
def snake_case ( self ) -> Optional[int]:
__lowerCAmelCase : Optional[Any] = "facebook/opt-350m"
super().setUp()
def snake_case ( self ) -> str:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowerCAmelCase : Dict = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowerCAmelCase : Dict = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__A ) ):
__lowerCAmelCase : List[str] = LoRALayer(module.q_proj , rank=16 )
__lowerCAmelCase : Optional[int] = LoRALayer(module.k_proj , rank=16 )
__lowerCAmelCase : int = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowerCAmelCase : Any = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowerCAmelCase : Optional[int] = model.forward(**__A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__A , __A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
_snake_case = '''gpt2-xl'''
_snake_case = 3.3_191_854_854_152_187
| 703 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
A_ = {
"google/pegasus-xsum": 5_12,
}
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PegasusTokenizer
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<mask_2>" , SCREAMING_SNAKE_CASE="<mask_1>" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1_03 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
__lowerCAmelCase : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError(
F"""additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE )}, but is"""
F""" {type(SCREAMING_SNAKE_CASE )}""" )
__lowerCAmelCase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE ) ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__lowerCAmelCase : Tuple = additional_special_tokens_extended
else:
__lowerCAmelCase : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , mask_token_sent=SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = vocab_file
__lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 123 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=18 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,) -> List[Any]:
UpperCAmelCase_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Dict = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Any = apply_ocr
def a__ ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self ) -> str:
UpperCAmelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> int:
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''apply_ocr''' ) )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def a__ ( self ) -> Any:
pass
def a__ ( self ) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
self.assertIsInstance(encoding.words ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes ,_SCREAMING_SNAKE_CASE )
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : str = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def a__ ( self ) -> int:
# with apply_OCR = True
UpperCAmelCase_ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ : str = load_dataset('''hf-internal-testing/fixtures_docvqa''' ,split='''test''' )
UpperCAmelCase_ : Tuple = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCAmelCase_ : Tuple = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ : List[str] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCAmelCase_ : Optional[int] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes ,_SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCAmelCase_ : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 30 |
from __future__ import annotations
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE ) ):
series.append(f'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE ) )}''' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Any = int(input('Enter the last number (nth term) of the P-Series'))
A_ : List[str] = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 303 | 0 |
"""simple docstring"""
from math import pi, sqrt, tan
def __snake_case ( SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __snake_case ( SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __snake_case ( SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_lowerCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __snake_case ( SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_lowerCAmelCase = (sidea + sidea + sidea) / 2
_lowerCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __snake_case ( SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f'Rectangle: {area_rectangle(1_0, 2_0) = }')
print(f'Square: {area_square(1_0) = }')
print(f'Triangle: {area_triangle(1_0, 1_0) = }')
print(f'Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }')
print(f'Parallelogram: {area_parallelogram(1_0, 2_0) = }')
print(f'Rhombus: {area_rhombus(1_0, 2_0) = }')
print(f'Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }')
print(f'Circle: {area_circle(2_0) = }')
print(f'Ellipse: {area_ellipse(1_0, 2_0) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(f'Cube: {surface_area_cube(2_0) = }')
print(f'Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }')
print(f'Sphere: {surface_area_sphere(2_0) = }')
print(f'Hemisphere: {surface_area_hemisphere(2_0) = }')
print(f'Cone: {surface_area_cone(1_0, 2_0) = }')
print(f'Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }')
print(f'Cylinder: {surface_area_cylinder(1_0, 2_0) = }')
print(f'Torus: {surface_area_torus(2_0, 1_0) = }')
print(f'Equilateral Triangle: {area_reg_polygon(3, 1_0) = }')
print(f'Square: {area_reg_polygon(4, 1_0) = }')
print(f'Reqular Pentagon: {area_reg_polygon(5, 1_0) = }')
| 491 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Tuple=None , ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MPNetForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_: int = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: Dict = True
def __lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_ )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_lowerCAmelCase = model(UpperCAmelCase_ )[0]
_lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_lowerCAmelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 491 | 1 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = emb.weight.shape
lowerCAmelCase__ :int = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = emb.weight.data
return lin_layer
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
lowerCAmelCase__ :Dict = Namespace(**checkpoint['cfg']['model'] )
lowerCAmelCase__ :Optional[int] = checkpoint['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = state_dict['decoder.embed_tokens.weight'].shape[0]
lowerCAmelCase__ :Dict = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
lowerCAmelCase__ :int = XGLMConfig(
vocab_size=_SCREAMING_SNAKE_CASE , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase__ :Tuple = XGLMForCausalLM(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 93 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
_UpperCamelCase : List[Any] = "Hello world! cécé herlolip"
_UpperCamelCase : int = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def snake_case ( snake_case : int , snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = BertAbsConfig(
temp_dir='.' , finetune_bert=snake_case , large=snake_case , share_emb=snake_case , use_bert_emb=snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCAmelCase = torch.load(snake_case , lambda snake_case , snake_case : storage )
lowerCAmelCase = AbsSummarizer(snake_case , torch.device('cpu' ) , snake_case )
original.eval()
lowerCAmelCase = BertAbsSummarizer(snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
lowerCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
lowerCAmelCase = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) )
lowerCAmelCase = torch.tensor(snake_case ).unsqueeze(0 )
lowerCAmelCase = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) )
lowerCAmelCase = torch.tensor(snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCAmelCase = encoder_input_ids
lowerCAmelCase = decoder_input_ids
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCAmelCase = original(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )[0]
lowerCAmelCase = original.generator(snake_case )
lowerCAmelCase = new_model(
snake_case , snake_case , snake_case , snake_case , snake_case )[0]
lowerCAmelCase = new_model.generator(snake_case )
lowerCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case ) )
lowerCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case ) )
lowerCAmelCase = torch.allclose(snake_case , snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_UpperCamelCase : Optional[int] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 284 | 0 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_a = [0] * len(_lowerCAmelCase )
_a = []
_a = []
_a = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCAmelCase )
while queue:
_a = queue.pop(0 )
cnt += 1
topo.append(_lowerCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCAmelCase )
if cnt != len(_lowerCAmelCase ):
print('''Cycle exists''' )
else:
print(_lowerCAmelCase )
# Adjacency List of Graph
__snake_case = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 285 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[int] = ['image_processor', 'tokenizer']
A_ : Union[str, Any] = 'ChineseCLIPImageProcessor'
A_ : Union[str, Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[str]:
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
_a = kwargs.pop('''feature_extractor''' )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
_a = self.image_processor
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[str]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self ) -> Tuple:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
| 285 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Dict = 'xmod'
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=("en_XX",) , UpperCAmelCase=None , **UpperCAmelCase , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
lowerCamelCase_ = pre_norm
lowerCamelCase_ = adapter_reduction_factor
lowerCamelCase_ = adapter_layer_norm
lowerCamelCase_ = adapter_reuse_layer_norm
lowerCamelCase_ = ln_before_adapter
lowerCamelCase_ = list(UpperCAmelCase )
lowerCamelCase_ = default_language
class __lowerCamelCase ( lowerCAmelCase ):
@property
def UpperCAmelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 29 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE = {
"google/rembert": 256,
}
_SCREAMING_SNAKE_CASE = "▁"
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = RemBertTokenizer
def __init__( self : List[Any] , __snake_case : int=None , __snake_case : Union[str, Any]=None , __snake_case : Tuple=True , __snake_case : Dict=True , __snake_case : str=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : Optional[int]="[SEP]" , __snake_case : str="<unk>" , __snake_case : Dict="[SEP]" , __snake_case : Dict="<pad>" , __snake_case : Union[str, Any]="[CLS]" , __snake_case : int="[MASK]" , **__snake_case : Optional[int] , )-> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
snake_case = do_lower_case
snake_case = remove_space
snake_case = keep_accents
snake_case = vocab_file
snake_case = False if not self.vocab_file else True
def lowerCAmelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def lowerCAmelCase ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__snake_case ) )
return
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 369 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : str = {"vocab_file": "spiece.model"}
__lowerCAmelCase : Union[str, Any] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__lowerCAmelCase : str = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
__lowerCAmelCase : int = "▁"
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : str , snake_case__ : List[Any] , snake_case__ : Optional[int]="</s>" , snake_case__ : List[str]="<unk>" , snake_case__ : Union[str, Any]="<pad>" , snake_case__ : Tuple=100 , snake_case__ : Dict=None , snake_case__ : Optional[Dict[str, Any]] = None , snake_case__ : Union[str, Any]=True , **snake_case__ : Tuple , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ = [F"""<extra_id_{i}>""" for i in range(snake_case__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase__ = len(set(filter(lambda snake_case__ : bool("""extra_id""" in str(snake_case__ ) ) , snake_case__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
lowerCAmelCase__ = legacy
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , extra_ids=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case__ , **snake_case__ , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = extra_ids
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCAmelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , snake_case__ , )
return max_model_length
@property
def _SCREAMING_SNAKE_CASE ( self : str ):
return self.sp_model.get_piece_size() + self._extra_ids
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return list(
set(filter(lambda snake_case__ : bool(re.search(R"""<extra_id_\d+>""" , snake_case__ ) ) is not None , self.additional_special_tokens ) ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return [self._convert_token_to_id(snake_case__ ) for token in self.get_sentinel_tokens()]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[int] ):
if len(snake_case__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = self._add_eos_if_not_present(snake_case__ )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase__ = self._add_eos_if_not_present(snake_case__ )
return token_ids_a + token_ids_a
def __getstate__( self : str ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : Dict , snake_case__ : Tuple ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : "TextInput" , **snake_case__ : Any ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowerCAmelCase__ = SPIECE_UNDERLINE + text.replace(snake_case__ , """ """ )
return super().tokenize(snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Dict , **snake_case__ : Optional[Any] ):
if not self.legacy:
lowerCAmelCase__ = text.startswith(snake_case__ )
if is_first:
lowerCAmelCase__ = text[1:]
lowerCAmelCase__ = self.sp_model.encode(snake_case__ , out_type=snake_case__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(snake_case__ ):
lowerCAmelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : str ):
if token.startswith("""<extra_id_""" ):
lowerCAmelCase__ = re.match(R"""<extra_id_(\d+)>""" , snake_case__ )
lowerCAmelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Dict ):
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ = self.sp_model.IdToPiece(snake_case__ )
else:
lowerCAmelCase__ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = """"""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(snake_case__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , """wb""" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 674 |
"""simple docstring"""
import os
from math import logaa
def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCAmelCase__ = x * logaa(lowerCamelCase__ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 674 | 1 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
snake_case : Tuple ={
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
snake_case : Any =input_paths_and_base_extractors[compression_format]
if input_path is None:
snake_case : Any =F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCamelCase )
assert base_extractor.is_extractable(_lowerCamelCase )
snake_case : Tuple =tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(_lowerCamelCase , _lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case : Union[str, Any] =file_path.read_text(encoding='''utf-8''' )
else:
snake_case : Any =output_path.read_text(encoding='''utf-8''' )
snake_case : Optional[int] =text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
snake_case : str ={
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
snake_case : List[Any] =input_paths[compression_format]
if input_path is None:
snake_case : List[str] =F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCamelCase )
snake_case : Tuple =Extractor.infer_extractor_format(_lowerCamelCase )
assert extractor_format is not None
snake_case : int =tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case : str =file_path.read_text(encoding='''utf-8''' )
else:
snake_case : Dict =output_path.read_text(encoding='''utf-8''' )
snake_case : Union[str, Any] =text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
import tarfile
snake_case : Optional[Any] =tmp_path / "data_dot_dot"
directory.mkdir()
snake_case : Optional[int] =directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(_lowerCamelCase , '''w''' ) as f:
f.add(_lowerCamelCase , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def _a ( lowerCamelCase_ ):
import tarfile
snake_case : Optional[Any] =tmp_path / "data_sym_link"
directory.mkdir()
snake_case : List[Any] =directory / "tar_file_with_sym_link.tar"
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=_lowerCamelCase )
with tarfile.TarFile(_lowerCamelCase , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Union[str, Any] ={
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
snake_case : Any =insecure_tar_files[insecure_tar_file]
snake_case : Tuple =tmp_path / "extracted"
TarExtractor.extract(_lowerCamelCase , _lowerCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _a ( lowerCamelCase_ ):
snake_case : Dict =tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
snake_case : Dict =(
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(_lowerCamelCase )
assert zipfile.is_zipfile(str(_lowerCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_lowerCamelCase ) # but we're right
| 349 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
_snake_case : Dict = True
except (ImportError, ModuleNotFoundError):
_snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def A__ ( UpperCamelCase ):
re.sub("<n>" , "" , UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase ) )
| 718 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[int] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_snake_case : Union[str, Any] = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = GPTaTokenizer
def __init__( self :Optional[Any] , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[Any]=None , __UpperCamelCase :str="<|endoftext|>" , __UpperCamelCase :Tuple="<|endoftext|>" , __UpperCamelCase :Dict="<|endoftext|>" , __UpperCamelCase :Union[str, Any]=False , **__UpperCamelCase :Union[str, Any] , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
A = kwargs.pop("add_bos_token" , __UpperCamelCase )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
A = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
A = add_prefix_space
A = pre_tok_class(**__UpperCamelCase )
A = add_prefix_space
def lowerCamelCase ( self :Any , *__UpperCamelCase :Optional[int] , **__UpperCamelCase :Any ):
A = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :Dict , *__UpperCamelCase :List[str] , **__UpperCamelCase :Optional[int] ):
A = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str , __UpperCamelCase :Optional[str] = None ):
A = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :"Conversation" ):
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
| 524 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def UpperCAmelCase ( snake_case : Union[str, Any] = None ):
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
_lowerCAmelCase:Optional[int] = nums[0]
for i in range(1 , len(snake_case ) ):
_lowerCAmelCase:List[str] = nums[i]
_lowerCAmelCase:List[str] = max(snake_case , ans + num , snake_case )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCamelCase__ = int(input('''Enter number of elements : ''').strip())
UpperCamelCase__ = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 227 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ : List[str] =imread(r'''digital_image_processing/image_data/lena_small.jpg''')
A__ : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase = canny.canny(lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
assert gg.gaussian_filter(lowerCAmelCase , 5 , sigma=0.9 ).all()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase = conv.img_convolve(lowerCAmelCase , lowerCAmelCase ).astype(lowerCAmelCase )
assert res.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
assert med.median_filter(lowerCAmelCase , 3 ).any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(lowerCAmelCase )
assert grad.any() and theta.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = sp.make_sepia(lowerCAmelCase , 20 )
assert sepia.all()
def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
_lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
_lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
_lowerCAmelCase = imread(lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = image[x_coordinate][y_coordinate]
_lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
assert lbp_image.any()
| 207 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """tokenizer"""]
UpperCamelCase__ = """FlavaImageProcessor"""
UpperCamelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , __UpperCamelCase : Any=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any )->str:
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
def __call__( self : List[str] , __UpperCamelCase : Optional[ImageInput] = None , __UpperCamelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : Optional[int] , )->Optional[int]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if images is not None:
_UpperCAmelCase = self.image_processor(
__UpperCamelCase , return_image_mask=__UpperCamelCase , return_codebook_pixels=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if text is not None and images is not None:
encoding.update(__UpperCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowercase__ ( self : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] )->Optional[int]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[Any] )->Any:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int )->Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCamelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCamelCase , )
return self.image_processor
| 95 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def is_in_circle(_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
_UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ):
'''simple docstring'''
def identity_function(_SCREAMING_SNAKE_CASE : float ) -> float:
return x
_UpperCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print('''******************''' )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def function_to_integrate(_SCREAMING_SNAKE_CASE : float ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def lowercase ( A_ : ArgumentParser ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def lowercase ( self : List[Any] ) -> List[Any]:
raise NotImplementedError()
| 564 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE: Optional[int] = 2_9_9_7_9_2_4_5_8
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Tuple = symbols('''ct x y z''')
def _a ( lowerCAmelCase )-> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def _a ( lowerCAmelCase )-> float:
return 1 / sqrt(1 - beta(lowerCAmelCase ) ** 2 )
def _a ( lowerCAmelCase )-> np.ndarray:
return np.array(
[
[gamma(lowerCAmelCase ), -gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), 0, 0],
[-gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), gamma(lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _a ( lowerCAmelCase , lowerCAmelCase = None )-> np.ndarray:
# Ensure event is not empty
if event is None:
SCREAMING_SNAKE_CASE_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE: int = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(f"""ct' = {four_vector[0]}""")
print(f"""x' = {four_vector[1]}""")
print(f"""y' = {four_vector[2]}""")
print(f"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE: List[Any] = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE: Optional[Any] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"""\n{numerical_vector}""")
| 360 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def _SCREAMING_SNAKE_CASE ( A : int = 8 , A : int | None = None ) -> str:
"""simple docstring"""
__snake_case : str = np.random.default_rng(seed=A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__snake_case : Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
__snake_case : str = rng.integers(2 , size=A )
# The set of states Alice will prepare.
__snake_case : Dict = rng.integers(2 , size=A )
# Measurement basis for Bob's qubits.
__snake_case : Tuple = rng.integers(2 , size=A )
# Quantum Circuit to simulate BB84
__snake_case : List[str] = qiskit.QuantumCircuit(A , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A ):
if alice_state[index] == 1:
bbaa_circ.x(A )
if alice_basis[index] == 1:
bbaa_circ.h(A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A ):
if bob_basis[index] == 1:
bbaa_circ.h(A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__snake_case : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__snake_case : List[Any] = qiskit.execute(A , A , shots=1 , seed_simulator=A )
# Returns the result of measurement.
__snake_case : List[Any] = job.result().get_counts(A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__snake_case : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A , A , A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__snake_case : Any = gen_key[:key_len] if len(A ) >= key_len else gen_key.ljust(A , '0' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 709 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class a_ ( UpperCamelCase_ ):
_snake_case = ["""pixel_values"""]
def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None:
"""simple docstring"""
super().__init__(**__a)
__snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4}
__snake_case : List[Any] = get_size_dict(__a , default_to_square=__a)
__snake_case : int = do_resize
__snake_case : List[str] = size
# Default value set here for backwards compatibility where the value in config is None
__snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
__snake_case : Tuple = resample
__snake_case : Dict = do_rescale
__snake_case : Any = rescale_factor
__snake_case : str = do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
__snake_case : Dict = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""")
__snake_case : List[str] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__snake_case : Any = int(shortest_edge / crop_pct)
__snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a)
__snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
"""simple docstring"""
__snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
__snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct
__snake_case : Tuple = resample if resample is not None else self.resample
__snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean
__snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
__snake_case : List[str] = size if size is not None else self.size
__snake_case : Any = get_size_dict(__a , default_to_square=__a)
__snake_case : Dict = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__snake_case : Tuple = [to_numpy_array(__a) for image in images]
if do_resize:
__snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
__snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
__snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images]
__snake_case : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a)
| 61 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = "▁"
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
_a = {
"google/reformer-crime-and-punishment": 524_288,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.piece_to_id(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowerCamelCase__ = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 481 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCAmelCase__() -> str:
'''simple docstring'''
lowerCamelCase__ = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__snake_case )
lowerCamelCase__ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__snake_case )
env_command_parser(subparsers=__snake_case )
launch_command_parser(subparsers=__snake_case )
tpu_command_parser(subparsers=__snake_case )
test_command_parser(subparsers=__snake_case )
# Let's go
lowerCamelCase__ = parser.parse_args()
if not hasattr(__snake_case ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__snake_case )
if __name__ == "__main__":
main()
| 481 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a = get_logger()
a = None
class lowercase_ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_A = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_A = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
_A = str(jax.devices()[0] )
_A = jnp_array_kwargs
@staticmethod
def lowerCAmelCase_ ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_A = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_A = {"dtype": jnp.intaa}
else:
_A = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_A = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
_A = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_A = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , '__array__' ) and not isinstance(__A , jax.Array ):
_A = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : pa.Table ):
_A = self.numpy_arrow_extractor().extract_row(__A )
_A = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : pa.Table ):
_A = self.numpy_arrow_extractor().extract_column(__A )
_A = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
_A = self.recursive_tensorize(__A )
_A = self._consolidate(__A )
return column
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : pa.Table ):
_A = self.numpy_arrow_extractor().extract_batch(__A )
_A = self.python_features_decoder.decode_batch(__A )
_A = self.recursive_tensorize(__A )
for column_name in batch:
_A = self._consolidate(batch[column_name] )
return batch
| 703 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 505 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
snake_case_ = flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
snake_case_ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
snake_case_ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = flax_dict[key]
snake_case_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
snake_case_ = get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
snake_case_ = PixaStructVisionConfig()
snake_case_ = PixaStructTextConfig()
else:
snake_case_ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
snake_case_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
snake_case_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
snake_case_ = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
snake_case_ = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
snake_case_ = PixaStructImageProcessor()
snake_case_ = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
snake_case_ = 4096
snake_case_ = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
lowerCAmelCase_ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 39 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = "informer"
SCREAMING_SNAKE_CASE : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]:
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Optional[Any] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 39 | 1 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : str = model.config
lowercase__ : Optional[int] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase__ : Optional[int] = MBartConfig(
is_decoder=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_lowerCAmelCase , add_final_layer_norm=_lowerCAmelCase , )
return encoder_config, decoder_config
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if "encoder.model" in name:
lowercase__ : str = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
lowercase__ : Any = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
lowercase__ : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowercase__ : List[str] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
lowercase__ : str = 'encoder.' + name
if "attn.proj" in name:
lowercase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
lowercase__ : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
lowercase__ : List[str] = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
lowercase__ : List[Any] = 'encoder.layernorm.bias'
return name
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : Dict = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
lowercase__ : str = key.split('.' )
lowercase__ : int = int(key_split[3] )
lowercase__ : Tuple = int(key_split[5] )
lowercase__ : Any = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ : Optional[Any] = val[:dim, :]
lowercase__ : List[Any] = val[dim : dim * 2, :]
lowercase__ : Tuple = val[-dim:, :]
else:
lowercase__ : Union[str, Any] = val[:dim]
lowercase__ : List[str] = val[dim : dim * 2]
lowercase__ : Tuple = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase__ : List[Any] = val
return orig_state_dict
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[Any]=False ):
'''simple docstring'''
lowercase__ : Optional[Any] = DonutModel.from_pretrained(_lowerCAmelCase ).eval()
# load HuggingFace model
lowercase__ , lowercase__ : Dict = get_configs(_lowerCAmelCase )
lowercase__ : Dict = DonutSwinModel(_lowerCAmelCase )
lowercase__ : str = MBartForCausalLM(_lowerCAmelCase )
lowercase__ : Union[str, Any] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
lowercase__ : int = original_model.state_dict()
lowercase__ : Optional[int] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# verify results on scanned document
lowercase__ : Union[str, Any] = load_dataset('hf-internal-testing/example-documents' )
lowercase__ : str = dataset['test'][0]['image'].convert('RGB' )
lowercase__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained(_lowerCAmelCase , from_slow=_lowerCAmelCase )
lowercase__ : Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase__ : List[str] = DonutProcessor(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : int = processor(_lowerCAmelCase , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase__ : Dict = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
lowercase__ : Tuple = 'When is the coffee break?'
lowercase__ : str = task_prompt.replace('{user_input}' , _lowerCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase__ : Union[str, Any] = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase__ : List[Any] = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase__ : Optional[int] = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase__ : Optional[int] = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase__ : str = 'hello world'
else:
raise ValueError('Model name not supported' )
lowercase__ : str = original_model.decoder.tokenizer(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors='pt' )[
'input_ids'
]
lowercase__ : Optional[int] = original_model.encoder.model.patch_embed(_lowerCAmelCase )
lowercase__ , lowercase__ : str = model.encoder.embeddings(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
# verify encoder hidden states
lowercase__ : Union[str, Any] = original_model.encoder(_lowerCAmelCase )
lowercase__ : Optional[Any] = model.encoder(_lowerCAmelCase ).last_hidden_state
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
# verify decoder hidden states
lowercase__ : Tuple = original_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).logits
lowercase__ : str = model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_UpperCamelCase : int = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 645 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645 | 1 |
"""simple docstring"""
import enum
import shutil
import sys
snake_case , snake_case = shutil.get_terminal_size()
snake_case = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class UpperCAmelCase ( enum.Enum ):
A__ : List[Any] = 0
A__ : List[str] = 1
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_="" ) -> List[Any]:
sys.stdout.write(str(lowerCAmelCase_ ) + end )
sys.stdout.flush()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="" ) -> List[Any]:
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , lowerCAmelCase_ )
def snake_case ( ) -> Dict:
forceWrite('''\r''' )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def snake_case ( ) -> Tuple:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def snake_case ( ) -> int:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 103 |
from PIL import Image
def A__ ( _a : Image , _a : float ):
'''simple docstring'''
def brightness(_a : int ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_a )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
__lowerCamelCase : str = change_brightness(img, 1_00)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 385 | 0 |
"""simple docstring"""
from math import isclose, sqrt
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = point_y / 4 / point_x
UpperCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase = outgoing_gradient**2 + 4
UpperCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase = x_minus if isclose(_snake_case , _snake_case ) else x_plus
UpperCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _a ( _snake_case = 1.4 , _snake_case = -9.6 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = first_x_coord
UpperCAmelCase = first_y_coord
UpperCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = next_point(_snake_case , _snake_case , _snake_case )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 |
"""simple docstring"""
import math
def _a ( _snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _snake_case = 0.1 ):
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
SCREAMING_SNAKE_CASE__ : Optional[int] = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
SCREAMING_SNAKE_CASE__ : Any = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
SCREAMING_SNAKE_CASE__ : Dict = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 79 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCAmelCase = Features({'image': Image()} )
__UpperCAmelCase = Features({'labels': ClassLabel} )
__UpperCAmelCase = "image"
__UpperCAmelCase = "labels"
def __snake_case ( self : int, _snake_case : Union[str, Any] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column], _snake_case ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case : List[str] =copy.deepcopy(self )
snake_case : List[str] =self.label_schema.copy()
snake_case : int =features[self.label_column]
snake_case : List[Any] =label_schema
return task_template
@property
def __snake_case ( self : str ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 349 | 0 |
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = [int(lowerCamelCase_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(lowerCamelCase_ ) == 4 and all(0 <= int(lowerCamelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Union[str, Any] = input().strip()
SCREAMING_SNAKE_CASE_:Tuple = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 712 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 520 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.