code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 299 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(UpperCamelCase , UpperCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
lowerCamelCase__ : str = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[Any] ={
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowercase ( _lowercase ):
a = """deformable_detr"""
a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self: Optional[int] , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , UpperCamelCase__: List[str]=3 , UpperCamelCase__: Any=300 , UpperCamelCase__: Optional[int]=1_024 , UpperCamelCase__: int=6 , UpperCamelCase__: str=1_024 , UpperCamelCase__: Optional[Any]=8 , UpperCamelCase__: Optional[Any]=6 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=8 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any="relu" , UpperCamelCase__: Any=256 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Any=0.0 , UpperCamelCase__: List[str]=0.02 , UpperCamelCase__: str=1.0 , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: str="sine" , UpperCamelCase__: Optional[Any]="resnet50" , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: Any=4 , UpperCamelCase__: int=4 , UpperCamelCase__: int=False , UpperCamelCase__: Optional[Any]=300 , UpperCamelCase__: str=False , UpperCamelCase__: int=1 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Optional[int]=1 , UpperCamelCase__: int=1 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[str]=0.25 , UpperCamelCase__: Any=False , **UpperCamelCase__: Optional[Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase__ : Union[str, Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = backbone_config.get("""model_type""" )
lowerCamelCase__ : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Optional[Any] = config_class.from_dict(UpperCamelCase__ )
lowerCamelCase__ : Tuple = use_timm_backbone
lowerCamelCase__ : Tuple = backbone_config
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = num_queries
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : str = d_model
lowerCamelCase__ : Dict = encoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : int = encoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Optional[Any] = decoder_layers
lowerCamelCase__ : Optional[int] = decoder_attention_heads
lowerCamelCase__ : int = dropout
lowerCamelCase__ : Optional[Any] = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : Dict = init_xavier_std
lowerCamelCase__ : Tuple = encoder_layerdrop
lowerCamelCase__ : str = auxiliary_loss
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : Tuple = backbone
lowerCamelCase__ : Tuple = use_pretrained_backbone
lowerCamelCase__ : Optional[int] = dilation
# deformable attributes
lowerCamelCase__ : Optional[int] = num_feature_levels
lowerCamelCase__ : Tuple = encoder_n_points
lowerCamelCase__ : Tuple = decoder_n_points
lowerCamelCase__ : Dict = two_stage
lowerCamelCase__ : int = two_stage_num_proposals
lowerCamelCase__ : Any = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCamelCase__ : Optional[int] = class_cost
lowerCamelCase__ : List[str] = bbox_cost
lowerCamelCase__ : Any = giou_cost
# Loss coefficients
lowerCamelCase__ : Union[str, Any] = mask_loss_coefficient
lowerCamelCase__ : Tuple = dice_loss_coefficient
lowerCamelCase__ : int = bbox_loss_coefficient
lowerCamelCase__ : Optional[Any] = giou_loss_coefficient
lowerCamelCase__ : Dict = eos_coefficient
lowerCamelCase__ : Any = focal_alpha
lowerCamelCase__ : Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Dict ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self: Any ):
return self.d_model
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase__ : int = self.backbone_config.to_dict()
lowerCamelCase__ : int = self.__class__.model_type
return output
| 129 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
lowercase = StableDiffusionInstructPixaPixPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(UpperCAmelCase_ )
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> List[str]:
'''simple docstring'''
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCAmelCase_ )
else:
A_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
A_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ )
A_ = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
A_ = self.get_dummy_inputs(UpperCAmelCase_ )
A_ = sd_pipe(**UpperCAmelCase_ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ )
A_ = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
A_ = self.get_dummy_inputs(UpperCAmelCase_ )
A_ = """french fries"""
A_ = sd_pipe(**UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ )
A_ = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
A_ = self.get_dummy_inputs(UpperCAmelCase_ )
A_ = [inputs["""prompt"""]] * 2
A_ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
A_ = torch.from_numpy(UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
A_ = image / 2 + 0.5
A_ = image.permute(0 , 3 , 1 , 2 )
A_ = image.repeat(2 , 1 , 1 , 1 )
A_ = sd_pipe(**UpperCAmelCase_ ).images
A_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A_ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ )
A_ = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
A_ = self.get_dummy_inputs(UpperCAmelCase_ )
A_ = sd_pipe(**UpperCAmelCase_ ).images
A_ = image[0, -3:, -3:, -1]
A_ = [round(UpperCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(UpperCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A_ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.get_dummy_components()
A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase_ )
A_ = VaeImageProcessor(do_resize=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ )
A_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
A_ = pipe(**self.get_dummy_inputs_by_type(UpperCAmelCase_ , input_image_type="""pt""" ) )[0]
A_ = components["""vae"""]
A_ = self.get_dummy_inputs_by_type(UpperCAmelCase_ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A_ = vae.encode(inputs[image_param] ).latent_dist.mode()
A_ = pipe(**UpperCAmelCase_ )[0]
A_ = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCAmelCase_ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , UpperCamelCase__=0 ) -> List[Any]:
'''simple docstring'''
A_ = torch.manual_seed(UpperCAmelCase_ )
A_ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
A_ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
A_ = pipe(**UpperCAmelCase_ ).images
A_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ )
A_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
A_ = pipe(**UpperCAmelCase_ ).images
A_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ )
A_ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
A_ = pipe(**UpperCAmelCase_ ).images
A_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = 0
def callback_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
A_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A_ = False
A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa )
A_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
pipe(**UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa )
A_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ = self.get_inputs()
A_ = pipe(**UpperCAmelCase_ )
A_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A_ = inputs["""image"""].resize((504, 504) )
A_ = """timbrooks/instruct-pix2pix"""
A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
A_ = pipe(**UpperCAmelCase_ )
A_ = output.images[0]
A_ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
A_ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 162 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 347 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : Any = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A__(a_ ):
"""simple docstring"""
_A : Dict = '''data2vec-audio'''
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0_2 , _lowercase=1e-5 , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=16 , _lowercase=19 , _lowercase=5 , _lowercase=0.0_5 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="sum" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=(512, 512, 512, 512, 1_500) , _lowercase=(5, 3, 3, 1, 1) , _lowercase=(1, 2, 3, 1, 1) , _lowercase=512 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=False , _lowercase=3 , _lowercase=2 , _lowercase=3 , _lowercase=None , **_lowercase , ) -> List[Any]:
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
a_ : Union[str, Any] = hidden_size
a_ : Tuple = feat_extract_activation
a_ : Union[str, Any] = list(_lowercase )
a_ : Tuple = list(_lowercase )
a_ : Optional[Any] = list(_lowercase )
a_ : int = conv_bias
a_ : int = num_conv_pos_embeddings
a_ : Optional[Any] = num_conv_pos_embedding_groups
a_ : Tuple = conv_pos_kernel_size
a_ : List[str] = len(self.conv_dim )
a_ : List[Any] = num_hidden_layers
a_ : str = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : int = num_attention_heads
a_ : str = hidden_dropout
a_ : Any = attention_dropout
a_ : Tuple = activation_dropout
a_ : Tuple = feat_proj_dropout
a_ : Optional[Any] = final_dropout
a_ : Tuple = layerdrop
a_ : Any = layer_norm_eps
a_ : int = initializer_range
a_ : Dict = vocab_size
a_ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a_ : Any = mask_time_prob
a_ : Tuple = mask_time_length
a_ : Optional[int] = mask_time_min_masks
a_ : List[str] = mask_feature_prob
a_ : Optional[Any] = mask_feature_length
a_ : Union[str, Any] = mask_feature_min_masks
# ctc loss
a_ : Dict = ctc_loss_reduction
a_ : List[str] = ctc_zero_infinity
# adapter
a_ : int = add_adapter
a_ : Optional[Any] = adapter_kernel_size
a_ : int = adapter_stride
a_ : Dict = num_adapter_layers
a_ : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a_ : int = list(_lowercase )
a_ : Union[str, Any] = list(_lowercase )
a_ : Optional[Any] = list(_lowercase )
a_ : Tuple = xvector_output_dim
@property
def UpperCamelCase__ ( self ) -> int:
return math.prod(self.conv_stride )
| 248 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _a :
_lowercase : CommonSchedulerState
# setable values
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : Optional[int] = None
@classmethod
def lowerCamelCase_ ( cls: Dict , UpperCamelCase_: CommonSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray ) -> List[str]:
"""simple docstring"""
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ )
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : DDPMSchedulerState
class _a ( UpperCamelCase__ , UpperCamelCase__ ):
_lowercase : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowercase : jnp.dtype
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return True
@register_to_config
def __init__( self: Any , UpperCamelCase_: int = 1_000 , UpperCamelCase_: float = 0.0001 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[jnp.ndarray] = None , UpperCamelCase_: str = "fixed_small" , UpperCamelCase_: bool = True , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: jnp.dtype = jnp.floataa , ) -> int:
"""simple docstring"""
lowercase__ = dtype
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: int , UpperCamelCase_: Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: str=None ) -> List[Any]:
"""simple docstring"""
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(UpperCamelCase_ , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self: Any , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: int , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[jax.random.KeyArray] = None , UpperCamelCase_: bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(UpperCamelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(UpperCamelCase_ , num=1 )
lowercase__ = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __len__( self: str ) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 93 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = ShapEImgaImgPipeline
_lowercase : Optional[Any] = ['''image''']
_lowercase : Optional[int] = ['''image''']
_lowercase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Tuple = False
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int]=0 ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 93 | 1 |
def lowerCamelCase__ ( _A = 2000000 ):
'''simple docstring'''
snake_case_ = [0 for i in range(n + 1 )]
snake_case_ = 1
snake_case_ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _A ):
snake_case_ = 1
snake_case_ = 0
for i in range(_A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 187 |
from __future__ import annotations
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = str(_A )
return len(_A ) == 9 and set(_A ) == set("123456789" )
def lowerCamelCase__ ( ):
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
snake_case_ = 100002 * base_num
if is_9_pandigital(_A ):
return candidate
for base_num in range(333 , 99 , -1 ):
snake_case_ = 1002003 * base_num
if is_9_pandigital(_A ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 187 | 1 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowercase : Dict = getLogger(__name__)
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :str , snake_case_ :str , snake_case_ :int = 8 , snake_case_ :int = 1_024 , snake_case_ :Union[str, Any]="val" , snake_case_ :Any=None , snake_case_ :Any=False , snake_case_ :List[str]="summarization" , snake_case_ :Optional[int]=None , snake_case_ :Dict=1 , snake_case_ :Dict = None , snake_case_ :List[Any]="" , **snake_case_ :int , ):
__UpperCAmelCase = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=snake_case_ )
__UpperCAmelCase = Path(snake_case_ )
__UpperCAmelCase = save_dir.joinpath(F'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
__UpperCAmelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_ , snake_case_ ) # update config with task specific params
__UpperCAmelCase = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__UpperCAmelCase = num_return_sequences
__UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
__UpperCAmelCase = tokenizer.model_max_length
if prefix is None:
__UpperCAmelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__UpperCAmelCase = SeqaSeqDataset(
snake_case_ , snake_case_ , snake_case_ , max_target_length=1_024 , type_path=snake_case_ , n_obs=snake_case_ , prefix=snake_case_ , **snake_case_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__UpperCAmelCase = ds.make_sortish_sampler(snake_case_ , distributed=snake_case_ , add_extra_examples=snake_case_ , shuffle=snake_case_ )
__UpperCAmelCase = DataLoader(snake_case_ , sampler=snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn )
__UpperCAmelCase = []
for batch in tqdm(snake_case_ ):
__UpperCAmelCase = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=snake_case_ , num_beams=snake_case_ , **snake_case_ , )
__UpperCAmelCase = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__UpperCAmelCase = batch['''ids''']
if num_return_sequences > 1:
__UpperCAmelCase = chunks(snake_case_ , snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(snake_case_ , snake_case_ )
return results, sampler.num_replicas
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=snake_case_ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=snake_case_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=snake_case_ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=snake_case_ , default=snake_case_ )
parser.add_argument(
'''--type_path''' , type=snake_case_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=snake_case_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case_ , default=8 , required=snake_case_ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=snake_case_ , default=-1 , required=snake_case_ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=snake_case_ , default=1 , required=snake_case_ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=snake_case_ , default=600 , required=snake_case_ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument('''--tgt_lang''' , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument(
'''--prefix''' , type=snake_case_ , required=snake_case_ , default=snake_case_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__UpperCAmelCase = time.time()
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
__UpperCAmelCase = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(F'''parsed the following generate kwargs: {generate_kwargs}''' )
__UpperCAmelCase = Path(args.save_dir + '''_tmp''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
__UpperCAmelCase = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__UpperCAmelCase = {}
if args.src_lang is not None:
__UpperCAmelCase = args.src_lang
if args.tgt_lang is not None:
__UpperCAmelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eval_data_dir(
args.data_dir , snake_case_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case_ , **snake_case_ , )
if args.local_rank <= 0:
__UpperCAmelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
__UpperCAmelCase = gather_results_from_each_node(snake_case_ , snake_case_ , args.sync_timeout )
__UpperCAmelCase = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
__UpperCAmelCase = save_dir.joinpath('''pseudolabel_results.json''' )
print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_ , snake_case_ )
return
__UpperCAmelCase = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(snake_case_ ) as f:
__UpperCAmelCase = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
__UpperCAmelCase = '''translation''' in args.task
__UpperCAmelCase = calculate_bleu if calc_bleu else calculate_rouge
__UpperCAmelCase = '''bleu''' if calc_bleu else '''rouge'''
__UpperCAmelCase = score_fn(snake_case_ , snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = time.time() - start_time
__UpperCAmelCase = round(runtime / metrics['''n_obs'''] , 4 )
__UpperCAmelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__UpperCAmelCase = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_ , snake_case_ , indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_ , save_dir.joinpath(F'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_ , save_dir.joinpath(F'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = []
for partial_result in partial_results:
records.extend(snake_case_ )
__UpperCAmelCase = sorted(snake_case_ , key=lambda snake_case_ : x["id"] )
__UpperCAmelCase = [x['''pred'''] for x in records]
return preds
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
# WAIT FOR lots of .json files
__UpperCAmelCase = time.time()
logger.info('''waiting for all nodes to finish''' )
__UpperCAmelCase = None
while (time.time() - start_wait) < timeout:
__UpperCAmelCase = list(save_dir.glob('''rank_*.json''' ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__UpperCAmelCase = lmap(snake_case_ , snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 86 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_lowercase : Any = True
except ImportError:
_lowercase : str = False
try:
from torch.hub import _get_torch_home
_lowercase : Any = _get_torch_home()
except ImportError:
_lowercase : Dict = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_lowercase : Tuple = os.path.join(torch_cache_home, 'transformers')
_lowercase : int = 'https://cdn.huggingface.co'
_lowercase : Union[str, Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_lowercase : str = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_lowercase : str = os.path.join(PATH, 'config.yaml')
_lowercase : int = os.path.join(PATH, 'attributes.txt')
_lowercase : List[str] = os.path.join(PATH, 'objects.txt')
_lowercase : Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_lowercase : int = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_lowercase : Dict = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_lowercase : Union[str, Any] = 'pytorch_model.bin'
_lowercase : List[str] = 'config.yaml'
def lowercase__ ( snake_case_ :int=OBJECTS , snake_case_ :Optional[int]=ATTRIBUTES ):
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__UpperCAmelCase = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__UpperCAmelCase = v
return r
class _UpperCAmelCase :
a__ : Tuple = {}
def __init__( self : List[str] , _lowercase : dict , _lowercase : str = "root" , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = name
__UpperCAmelCase = level
__UpperCAmelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase = copy.deepcopy(_lowercase )
__UpperCAmelCase = copy.deepcopy(_lowercase )
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = Config(_lowercase , name=_lowercase , level=level + 1 )
__UpperCAmelCase = v
setattr(self , _lowercase , _lowercase )
__UpperCAmelCase = d
def __repr__( self : Any ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Dict ):
__UpperCAmelCase = val
__UpperCAmelCase = val
__UpperCAmelCase = key.split('''.''' )
__UpperCAmelCase = len(_lowercase ) - 1
__UpperCAmelCase = self._pointer
if len(_lowercase ) > 1:
for i, l in enumerate(_lowercase ):
if hasattr(self , _lowercase ) and isinstance(getattr(self , _lowercase ) , _lowercase ):
setattr(getattr(self , _lowercase ) , '''.'''.join(levels[i:] ) , _lowercase )
if l == last_level:
__UpperCAmelCase = val
else:
__UpperCAmelCase = pointer[l]
def a ( self : int ):
return self._pointer
def a ( self : List[str] , _lowercase : Dict , _lowercase : str ):
with open(F'''{file_name}''' , '''w''' ) as stream:
dump(_lowercase , _lowercase )
def a ( self : int , _lowercase : Dict , _lowercase : Tuple ):
with open(F'''{file_name}''' , '''w''' ) as stream:
json.dump(_lowercase , _lowercase )
@staticmethod
def a ( _lowercase : str ):
with open(_lowercase ) as stream:
__UpperCAmelCase = load(_lowercase , Loader=_lowercase )
return data
def __str__( self : Dict ):
__UpperCAmelCase = ''' '''
if self._name != "root":
__UpperCAmelCase = F'''{t * (self._level-1)}{self._name}:\n'''
else:
__UpperCAmelCase = ''''''
__UpperCAmelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_lowercase , _lowercase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_lowercase ).__name__})\n'''
__UpperCAmelCase = level
return r[:-1]
@classmethod
def a ( cls : str , _lowercase : str , **_lowercase : Any ):
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
return cls(_lowercase )
@classmethod
def a ( cls : Any , _lowercase : str , **_lowercase : str ):
__UpperCAmelCase = kwargs.pop('''cache_dir''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''force_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''resume_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''proxies''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''local_files_only''' , _lowercase )
if os.path.isdir(_lowercase ):
__UpperCAmelCase = os.path.join(_lowercase , _lowercase )
elif os.path.isfile(_lowercase ) or is_remote_url(_lowercase ):
__UpperCAmelCase = pretrained_model_name_or_path
else:
__UpperCAmelCase = hf_bucket_url(_lowercase , filename=_lowercase , use_cdn=_lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase = cached_path(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase = Config.load_yaml(_lowercase )
except EnvironmentError:
__UpperCAmelCase = '''Can\'t load config for'''
raise EnvironmentError(_lowercase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_lowercase ), kwargs
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = torch.load('''dump.pt''' , map_location=in_tensor.device )
__UpperCAmelCase = in_tensor.numpy()
__UpperCAmelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( snake_case_ :str , snake_case_ :str , snake_case_ :List[str]=True ):
__UpperCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase = '''/''' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( snake_case_ :str , snake_case_ :Tuple , snake_case_ :List[str]=None , snake_case_ :List[str]=0 , snake_case_ :List[Any]=None , ):
__UpperCAmelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__UpperCAmelCase = {'''user-agent''': ua}
if resume_size > 0:
__UpperCAmelCase = '''bytes=%d-''' % (resume_size,)
__UpperCAmelCase = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase = response.headers.get('''Content-Length''' )
__UpperCAmelCase = resume_size + int(snake_case_ ) if content_length is not None else None
__UpperCAmelCase = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :str=None , snake_case_ :Optional[int]=False , snake_case_ :List[Any]=None , snake_case_ :List[Any]=10 , snake_case_ :Optional[int]=False , snake_case_ :List[str]=None , snake_case_ :Union[str, Any]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__UpperCAmelCase = None
if not local_files_only:
try:
__UpperCAmelCase = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__UpperCAmelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__UpperCAmelCase = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__UpperCAmelCase = _resumable_file_manager
if os.path.exists(snake_case_ ):
__UpperCAmelCase = os.stat(snake_case_ ).st_size
else:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__UpperCAmelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__UpperCAmelCase = {'''url''': url, '''etag''': etag}
__UpperCAmelCase = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def lowercase__ ( snake_case_ :int , snake_case_ :str=None ):
__UpperCAmelCase = url.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
__UpperCAmelCase = url_hash.hexdigest()
if etag:
__UpperCAmelCase = etag.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[Any]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[int]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=None , snake_case_ :Any=False , snake_case_ :int=False , snake_case_ :Optional[int]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__UpperCAmelCase = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase = os.path.split(snake_case_ )
__UpperCAmelCase = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__UpperCAmelCase = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any]="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__UpperCAmelCase = eval(f.read() )
else:
__UpperCAmelCase = requests.get(snake_case_ )
try:
__UpperCAmelCase = requests.json()
except Exception:
__UpperCAmelCase = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase = eval(snake_case_ )
except Exception:
__UpperCAmelCase = data.split('''\n''' )
req.close()
return data
def lowercase__ ( snake_case_ :Union[str, Any] ):
__UpperCAmelCase = requests.get(snake_case_ )
__UpperCAmelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__UpperCAmelCase = pkl.load(snake_case_ )
__UpperCAmelCase = weights.pop('''model''' )
__UpperCAmelCase = {}
for k, v in model.items():
__UpperCAmelCase = torch.from_numpy(snake_case_ )
if "running_var" in k:
__UpperCAmelCase = torch.tensor([0] )
__UpperCAmelCase = k.replace('''running_var''' , '''num_batches_tracked''' )
__UpperCAmelCase = zero
return new
def lowercase__ ( ):
print(F'''{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__UpperCAmelCase = cva.imread(snake_case_ )
else:
__UpperCAmelCase = get_image_from_url(snake_case_ )
assert img is not None, F'''could not connect to: {im}'''
__UpperCAmelCase = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase = img[:, :, ::-1]
return img
def lowercase__ ( snake_case_ :Any , snake_case_ :int=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
| 86 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__snake_case : Optional[Any] =None
__snake_case : Any =logging.get_logger(__name__)
__snake_case : Optional[Any] ={'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[int] ={
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__snake_case : Tuple ={
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
__snake_case : List[str] =['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =["""input_ids""", """attention_mask"""]
snake_case_ =MBartTokenizer
snake_case_ =[]
snake_case_ =[]
def __init__(self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase="<s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="<s>" ,__lowerCamelCase="<unk>" ,__lowerCamelCase="<pad>" ,__lowerCamelCase="<mask>" ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,**__lowerCamelCase ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
super().__init__(
vocab_file=__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,src_lang=__lowerCamelCase ,tgt_lang=__lowerCamelCase ,additional_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : Optional[int] = vocab_file
lowerCAmelCase__ : str = False if not self.vocab_file else True
lowerCAmelCase__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase__ : int = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase__ : Union[str, Any] = src_lang if src_lang is not None else '''en_XX'''
lowerCAmelCase__ : Tuple = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase__ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase__ : List[str] = src_lang
lowerCAmelCase__ : Optional[Any] = self(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Dict = self.convert_tokens_to_ids(__lowerCamelCase )
lowerCAmelCase__ : int = tgt_lang_id
return inputs
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = "en_XX" ,__lowerCamelCase = None ,__lowerCamelCase = "ro_RO" ,**__lowerCamelCase ,) -> BatchEncoding:
"""simple docstring"""
lowerCAmelCase__ : Tuple = src_lang
lowerCAmelCase__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = self.convert_tokens_to_ids(__lowerCamelCase )
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase__ : int = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str ,pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = self.convert_tokens_to_ids(__lowerCamelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase__ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ : str = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str ,pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 129 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""image_processor""", """tokenizer"""]
snake_case_ ="""Pix2StructImageProcessor"""
snake_case_ =("""T5Tokenizer""", """T5TokenizerFast""")
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = False
super().__init__(__lowerCamelCase ,__lowerCamelCase )
def __call__(self ,__lowerCamelCase=None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = 20_48 ,__lowerCamelCase = 0 ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase__ : List[str] = self.tokenizer
lowerCAmelCase__ : List[str] = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase__ : int = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,**__lowerCamelCase )
else:
# add pixel_values and bbox
lowerCAmelCase__ : List[str] = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,header_text=__lowerCamelCase ,**__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase__ : List[str] = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
if "attention_mask" in text_encoding:
lowerCAmelCase__ : List[str] = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowerCAmelCase__ : Dict = text_encoding.pop('''input_ids''' )
else:
lowerCAmelCase__ : int = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.tokenizer.model_input_names
lowerCAmelCase__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 129 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = BertTokenizer
lowercase__ : Optional[Any] = BertTokenizerFast
lowercase__ : Any = True
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
lowerCAmelCase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = '''UNwant\u00E9d,running'''
lowerCAmelCase__ = '''unwanted, running'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [9, 6, 7, 12, 10, 11] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = '''UNwant\u00E9d,running'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# With lower casing
lowerCAmelCase__ = self.get_tokenizer(do_lower_case=lowerCamelCase_ )
lowerCAmelCase__ = self.get_rust_tokenizer(do_lower_case=lowerCamelCase_ )
lowerCAmelCase__ = '''UNwant\u00E9d,running'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=lowerCamelCase_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = BasicTokenizer()
lowerCAmelCase__ = '''a\n\'ll !!to?\'d of, can\'t.'''
lowerCAmelCase__ = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCAmelCase__ = {}
for i, token in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def __SCREAMING_SNAKE_CASE ( self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCAmelCase__ = tokenizer_r.encode_plus(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r.do_lower_case if hasattr(lowerCamelCase_ , '''do_lower_case''' ) else False
lowerCAmelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = ['''的''', '''人''', '''有''']
lowerCAmelCase__ = ''''''.join(lowerCamelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = False
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase_ )
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 360 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
lowerCAmelCase__ = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> int:
import torch
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = pipeline('''text-classification''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = pipeline('''text-classification''' , framework='''tf''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = TextClassificationPipeline(model=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCAmelCase__ = '''HuggingFace is in'''
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
lowerCAmelCase__ = ['''HuggingFace is in ''', '''Paris is in France''']
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}, {'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCAmelCase__ = text_classifier(lowerCamelCase_ , top_k=lowerCamelCase_ )
lowerCAmelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] * N, [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] * N] , )
lowerCAmelCase__ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCAmelCase__ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(lowerCamelCase_ ):
text_classifier(lowerCamelCase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCAmelCase__ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 228 | 0 |
def a ( snake_case__: int , snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
def count_of_possible_combinations(snake_case__: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def a ( snake_case__: int , snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__: int , snake_case__: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
lowercase_ = answer
return answer
lowercase_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def a ( snake_case__: int , snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
lowercase_ = [0] * (target + 1)
lowercase_ = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 30 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: int , snake_case__: Tuple , snake_case__: Dict , snake_case__: Dict , snake_case__: List[Any] , snake_case__: int , snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: str , snake_case__: Union[str, Any] , snake_case__: List[str] , snake_case__: int , ):
'''simple docstring'''
lowercase_ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Optional[int] , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: int , snake_case__: Optional[int] , ):
'''simple docstring'''
lowercase_ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowercase_ = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowercase_ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding='''utf-8''' )
else:
lowercase_ = output_path.read_text(encoding='''utf-8''' )
lowercase_ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( snake_case__: Union[str, Any] , snake_case__: List[Any] ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowercase_ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: List[str] , snake_case__: int , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / '''extracted'''
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase_ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 30 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase = ''
UpperCAmelCase = ''
UpperCAmelCase = ''
UpperCAmelCase = ''
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(f'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase = alltweets[-1].id - 1
print(f'...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'new_{screen_name}_tweets.csv' , """w""" ) as f:
lowerCAmelCase = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 355 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "time_series_transformer"
UpperCAmelCase : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 64 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.0_2 , A_=True , **A_ , ) -> Optional[Any]:
# time series specific configuration
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length or prediction_length
lowerCAmelCase = distribution_output
lowerCAmelCase = loss
lowerCAmelCase = input_size
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = scaling
lowerCAmelCase = num_dynamic_real_features
lowerCAmelCase = num_static_real_features
lowerCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = cardinality
else:
lowerCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = embedding_dimension
else:
lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase = input_size * len(A_ ) + self._number_of_features
lowerCAmelCase = d_model
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = decoder_layers
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = use_cache
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def __snake_case ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 187 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=8 ):
"""simple docstring"""
lowercase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : Dict=512 ):
"""simple docstring"""
lowercase_ : str = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : Any = np.array(pil_image.convert('''RGB''' ) )
lowercase_ : List[str] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : List[str] = np.transpose(__SCREAMING_SNAKE_CASE , [2, 0, 1] )
lowercase_ : Union[str, Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
return image
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , movq=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = min(int(num_inference_steps * strength ) , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__SCREAMING_SNAKE_CASE )}''' )
lowercase_ : Union[str, Any] = image.to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : Tuple = image
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__SCREAMING_SNAKE_CASE )
]
lowercase_ : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
else:
lowercase_ : List[Any] = self.movq.encode(__SCREAMING_SNAKE_CASE ).latent_dist.sample(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.movq.config.scaling_factor * init_latents
lowercase_ : List[Any] = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : str = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
# get latents
lowercase_ : str = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = init_latents
return latents
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
lowercase_ : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase_ : str = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
lowercase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 5_12 , __SCREAMING_SNAKE_CASE = 5_12 , __SCREAMING_SNAKE_CASE = 1_00 , __SCREAMING_SNAKE_CASE = 4.0 , __SCREAMING_SNAKE_CASE = 0.3 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Tuple = self._execution_device
lowercase_ : str = guidance_scale > 1.0
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
lowercase_ : Union[str, Any] = image_embeds.shape[0]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[Any] = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
lowercase_ : Optional[Any] = negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
lowercase_ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [image]
if not all(isinstance(__SCREAMING_SNAKE_CASE , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(__SCREAMING_SNAKE_CASE ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
lowercase_ : Any = torch.cat([prepare_image(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in image] , dim=0 )
lowercase_ : Optional[Any] = image.to(dtype=image_embeds.dtype , device=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self.movq.encode(__SCREAMING_SNAKE_CASE )['''latents''']
lowercase_ : List[Any] = latents.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = self.get_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , image_embeds.dtype , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {'''image_embeds''': image_embeds}
lowercase_ : List[Any] = self.unet(
sample=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , added_cond_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : str = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Optional[int] = variance_pred.chunk(2 )
lowercase_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )[0]
# post-processing
lowercase_ : Optional[int] = self.movq.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : int = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Optional[Any] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(__UpperCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 2 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase ):
return len(set(_UpperCamelCase ) ) == len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 86 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = """autoformer"""
a__ : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : int = 3 , **SCREAMING_SNAKE_CASE__ : Any , ) -> Union[str, Any]:
# time series specific configuration
__lowerCamelCase = prediction_length
__lowerCamelCase = context_length if context_length is not None else prediction_length
__lowerCamelCase = distribution_output
__lowerCamelCase = loss
__lowerCamelCase = input_size
__lowerCamelCase = num_time_features
__lowerCamelCase = lags_sequence
__lowerCamelCase = scaling
__lowerCamelCase = num_dynamic_real_features
__lowerCamelCase = num_static_real_features
__lowerCamelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase = cardinality
else:
__lowerCamelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowerCamelCase = embedding_dimension
else:
__lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__lowerCamelCase = d_model
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = use_cache
# Autoformer
__lowerCamelCase = label_length
__lowerCamelCase = moving_average
__lowerCamelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 339 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE__ : str = ""
SCREAMING_SNAKE_CASE__ : Any = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def __magic_name__ ( ) -> None:
__lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print('''Processing...''' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase = random_chars(32 )
__lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
__lowerCamelCase = []
for anno in new_annos[index]:
__lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(f'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]:
__lowerCamelCase = []
__lowerCamelCase = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ):
__lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
__lowerCamelCase = in_file.readlines()
__lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' )
__lowerCamelCase = []
for obj_list in obj_lists:
__lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]:
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for idx in range(len(__lowerCAmelCase ) ):
__lowerCamelCase = []
__lowerCamelCase = img_list[idx]
path_list.append(__lowerCAmelCase )
__lowerCamelCase = anno_list[idx]
__lowerCamelCase = cva.imread(__lowerCAmelCase )
if flip_type == 1:
__lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
__lowerCamelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
__lowerCamelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 339 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_encoder_blocks''' ) )
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=64 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[2, 2, 2, 2] , UpperCamelCase_=[8, 4, 2, 1] , UpperCamelCase_=[16, 32, 64, 128] , UpperCamelCase_=[1, 4, 8, 16] , UpperCamelCase_=[1, 2, 4, 8] , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = batch_size
UpperCamelCase__ :Tuple = image_size
UpperCamelCase__ :Tuple = num_channels
UpperCamelCase__ :List[str] = num_encoder_blocks
UpperCamelCase__ :Any = sr_ratios
UpperCamelCase__ :Any = depths
UpperCamelCase__ :Union[str, Any] = hidden_sizes
UpperCamelCase__ :Union[str, Any] = downsampling_rates
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Optional[int] = is_training
UpperCamelCase__ :str = use_labels
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :Optional[int] = num_labels
UpperCamelCase__ :Any = scope
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase__ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = SegformerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[str] = model(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.num_labels
UpperCamelCase__ :Any = SegformerForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCamelCase__ :Union[str, Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = 1
UpperCamelCase__ :Dict = SegformerForSemanticSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase_ )
UpperCamelCase__ :Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = config_and_inputs
UpperCamelCase__ :int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = SegformerModelTester(self )
UpperCamelCase__ :Tuple = SegformerConfigTester(self , config_class=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase_ )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :List[Any] = model_class(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :List[Any] = [*signature.parameters.keys()]
UpperCamelCase__ :Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Union[str, Any] = True
for model_class in self.all_model_classes:
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :List[Any] = False
UpperCamelCase__ :Optional[Any] = True
UpperCamelCase__ :List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :int = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ :str = outputs.attentions
UpperCamelCase__ :Any = sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ :Optional[Any] = True
UpperCamelCase__ :Tuple = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ :int = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# verify the first attentions (first block, first layer)
UpperCamelCase__ :str = (self.model_tester.image_size // 4) ** 2
UpperCamelCase__ :str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCamelCase__ :Union[str, Any] = (self.model_tester.image_size // 32) ** 2
UpperCamelCase__ :List[str] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCamelCase__ :Union[str, Any] = len(UpperCamelCase_ )
# Check attention is always last and order is fine
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :str = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :Any = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase_ ) )
UpperCamelCase__ :Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# verify the first attentions (first block, first layer)
UpperCamelCase__ :Any = (self.model_tester.image_size // 4) ** 2
UpperCamelCase__ :List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ :str = outputs.hidden_states
UpperCamelCase__ :Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Union[str, Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :str = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase_ ):
continue
UpperCamelCase__ :Tuple = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
UpperCamelCase__ :int = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
UpperCamelCase__ :Dict = model(**UpperCamelCase_ ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :List[str] = SegformerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_ )
UpperCamelCase__ :Dict = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
UpperCamelCase_ )
UpperCamelCase__ :Dict = prepare_img()
UpperCamelCase__ :Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' )
UpperCamelCase__ :Any = encoded_inputs.pixel_values.to(UpperCamelCase_ )
with torch.no_grad():
UpperCamelCase__ :Optional[Any] = model(UpperCamelCase_ )
UpperCamelCase__ :Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCamelCase__ :int = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_ )
UpperCamelCase__ :List[str] = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = prepare_img()
UpperCamelCase__ :Any = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' )
UpperCamelCase__ :Tuple = encoded_inputs.pixel_values.to(UpperCamelCase_ )
with torch.no_grad():
UpperCamelCase__ :Tuple = model(UpperCamelCase_ )
UpperCamelCase__ :int = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-1 ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_ )
UpperCamelCase__ :str = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
UpperCamelCase_ )
UpperCamelCase__ :List[Any] = prepare_img()
UpperCamelCase__ :List[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' )
UpperCamelCase__ :str = encoded_inputs.pixel_values.to(UpperCamelCase_ )
with torch.no_grad():
UpperCamelCase__ :str = model(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = outputs.logits.detach().cpu()
UpperCamelCase__ :Dict = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(500, 300)] )
UpperCamelCase__ :Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
UpperCamelCase__ :Any = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
| 97 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__UpperCamelCase : Dict[Optional[str], str] = {}
__UpperCamelCase : Dict[Optional[str], Exception] = {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ) -> Optional[int]:
a = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
a = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
a = format_type
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ) -> List[str]:
a = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
__UpperCamelCase : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
__UpperCamelCase : List[str] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
__UpperCamelCase : List[str] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __A ( __lowerCamelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __A ( __lowerCamelCase , **__lowerCamelCase ) -> Formatter:
a = get_format_type_from_alias(__lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 228 | 0 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def a_ ( __snake_case : np.ndarray , __snake_case : float ) -> np.ndarray:
"""simple docstring"""
# For applying gaussian function for each element in matrix.
lowerCamelCase_ =math.sqrt(__snake_case )
lowerCamelCase_ =1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a_ ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int , __snake_case : int ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a_ ( __snake_case : int , __snake_case : float ) -> np.ndarray:
"""simple docstring"""
# Creates a gaussian kernel of given dimension.
lowerCamelCase_ =np.zeros((kernel_size, kernel_size) )
for i in range(0 , __snake_case ):
for j in range(0 , __snake_case ):
lowerCamelCase_ =math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__snake_case , __snake_case )
def a_ ( __snake_case : np.ndarray , __snake_case : float , __snake_case : float , __snake_case : int , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.zeros(img.shape )
lowerCamelCase_ =get_gauss_kernel(__snake_case , __snake_case )
lowerCamelCase_, lowerCamelCase_ =img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCamelCase_ =get_slice(__snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase_ =img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCamelCase_ =vec_gaussian(__snake_case , __snake_case )
lowerCamelCase_ =np.multiply(__snake_case , __snake_case )
lowerCamelCase_ =np.multiply(__snake_case , __snake_case )
lowerCamelCase_ =np.sum(__snake_case ) / np.sum(__snake_case )
lowerCamelCase_ =val
return imga
def a_ ( __snake_case : list ) -> tuple:
"""simple docstring"""
lowerCamelCase_ =args[1] if args[1:] else '''../image_data/lena.jpg'''
lowerCamelCase_ =float(args[2] ) if args[2:] else 1.0
lowerCamelCase_ =float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCamelCase_ =int(args[4] )
lowerCamelCase_ =kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCamelCase_ =5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a_ , a_ , a_ , a_ : Union[str, Any] = parse_args(sys.argv)
a_ : Dict = cva.imread(filename, 0)
cva.imshow("""input image""", img)
a_ : Tuple = img / 2_55
a_ : Optional[Any] = out.astype("""float32""")
a_ : List[str] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a_ : Union[str, Any] = out * 2_55
a_ : Union[str, Any] = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 6 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Dict = ['pixel_values']
def __init__( self : Union[str, Any] ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Optional[Dict[str, int]] = None ,_UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Union[int, float] = 1 / 255 ,_UpperCAmelCase : Dict[str, int] = None ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Optional[Union[float, List[float]]] = None ,_UpperCAmelCase : Optional[Union[float, List[float]]] = None ,**_UpperCAmelCase : int ,):
super().__init__(**_UpperCAmelCase )
_a : Union[str, Any] = size if size is not None else {'height': 224, 'width': 224}
_a : Optional[int] = get_size_dict(_UpperCAmelCase )
_a : int = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_a : int = get_size_dict(_UpperCAmelCase ,default_to_square=_UpperCAmelCase ,param_name='crop_size' )
_a : Any = do_resize
_a : str = do_rescale
_a : Optional[Any] = do_normalize
_a : Optional[Any] = do_center_crop
_a : Any = crop_size
_a : Optional[Any] = size
_a : Optional[int] = resample
_a : str = rescale_factor
_a : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowercase ( self : List[str] ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Dict[str, int] ,_UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR ,_UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ,**_UpperCAmelCase : Optional[Any] ,):
_a : Dict = get_size_dict(_UpperCAmelCase )
if "shortest_edge" in size:
_a : Union[str, Any] = get_resize_output_image_size(_UpperCAmelCase ,size=size['shortest_edge'] ,default_to_square=_UpperCAmelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a : str = (size['height'], size['width'])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_UpperCAmelCase ,size=_UpperCAmelCase ,resample=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : List[str] ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Dict[str, int] ,_UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ,**_UpperCAmelCase : Any ,):
_a : Union[str, Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_UpperCAmelCase ,size=(size['height'], size['width']) ,data_format=_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : float ,_UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ,**_UpperCAmelCase : str ):
return rescale(_UpperCAmelCase ,scale=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Union[float, List[float]] ,_UpperCAmelCase : Union[float, List[float]] ,_UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ,**_UpperCAmelCase : List[Any] ,):
return normalize(_UpperCAmelCase ,mean=_UpperCAmelCase ,std=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : ImageInput ,_UpperCAmelCase : Optional[bool] = None ,_UpperCAmelCase : Dict[str, int] = None ,_UpperCAmelCase : PILImageResampling = None ,_UpperCAmelCase : bool = None ,_UpperCAmelCase : int = None ,_UpperCAmelCase : Optional[bool] = None ,_UpperCAmelCase : Optional[float] = None ,_UpperCAmelCase : Optional[bool] = None ,_UpperCAmelCase : Optional[Union[float, List[float]]] = None ,_UpperCAmelCase : Optional[Union[float, List[float]]] = None ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_UpperCAmelCase : Any ,):
_a : str = do_resize if do_resize is not None else self.do_resize
_a : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_a : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : Dict = crop_size if crop_size is not None else self.crop_size
_a : Union[str, Any] = get_size_dict(_UpperCAmelCase ,param_name='crop_size' ,default_to_square=_UpperCAmelCase )
_a : str = resample if resample is not None else self.resample
_a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_a : Optional[Any] = image_std if image_std is not None else self.image_std
_a : Union[str, Any] = size if size is not None else self.size
_a : Optional[int] = get_size_dict(_UpperCAmelCase )
if not is_batched(_UpperCAmelCase ):
_a : Tuple = [images]
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_a : int = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_a : Dict = [self.resize(image=_UpperCAmelCase ,size=_UpperCAmelCase ,resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
_a : Tuple = [self.center_crop(image=_UpperCAmelCase ,size=_UpperCAmelCase ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_UpperCAmelCase ,scale=_UpperCAmelCase ) for image in images]
if do_normalize:
_a : str = [self.normalize(image=_UpperCAmelCase ,mean=_UpperCAmelCase ,std=_UpperCAmelCase ) for image in images]
_a : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase ,_UpperCAmelCase ) for image in images]
_a : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase ,tensor_type=_UpperCAmelCase )
| 89 |
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = []
snake_case_ = 1
while len(_A ) < 1E6:
constant.append(str(_A ) )
i += 1
snake_case_ = "".join(_A )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 187 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def __a ( __lowerCamelCase, __lowerCamelCase=0.999, __lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ : Optional[int] = []
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : Dict = i / num_diffusion_timesteps
UpperCAmelCase_ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ), __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase, dtype=torch.floataa )
class A_ (lowercase__ ,lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase_ = 1000 , lowercase_ = "fixed_small_log" , lowercase_ = True , lowercase_ = 1.0 , lowercase_ = "epsilon" , lowercase_ = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase_ : Optional[int] = betas_for_alpha_bar(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = 1.0 - self.betas
UpperCAmelCase_ : Tuple = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase_ : Union[str, Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase_ : List[str] = 1.0
# setable values
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : str = torch.from_numpy(np.arange(0 , lowercase_ )[::-1].copy() )
UpperCAmelCase_ : str = variance_type
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
return sample
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
UpperCAmelCase_ : Optional[Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase_ : Dict = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(lowercase_ ).to(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase_ : str = t - 1
UpperCAmelCase_ : List[Any] = self.alphas_cumprod[t]
UpperCAmelCase_ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_ : Tuple = 1 - alpha_prod_t
UpperCAmelCase_ : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_ : List[Any] = self.betas[t]
else:
UpperCAmelCase_ : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase_ : Optional[int] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase_ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase_ : str = torch.log(torch.clamp(lowercase_ , min=1E-2_0 ) )
UpperCAmelCase_ : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase_ : Tuple = variance.log()
UpperCAmelCase_ : Optional[Any] = beta.log()
UpperCAmelCase_ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase_ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_=None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase_ , UpperCAmelCase_ : Dict = torch.split(lowercase_ , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ : Any = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase_ : Union[str, Any] = t - 1
UpperCAmelCase_ : List[str] = self.alphas_cumprod[t]
UpperCAmelCase_ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_ : str = 1 - alpha_prod_t
UpperCAmelCase_ : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_ : Tuple = self.betas[t]
UpperCAmelCase_ : Union[str, Any] = self.alphas[t]
else:
UpperCAmelCase_ : Any = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase_ : List[str] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase_ : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ : int = torch.clamp(
lowercase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase_ : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ : Union[str, Any] = 0
if t > 0:
UpperCAmelCase_ : Dict = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowercase_ , device=model_output.device )
UpperCAmelCase_ : Any = self._get_variance(
lowercase_ , predicted_variance=lowercase_ , prev_timestep=lowercase_ , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase_ : Any = variance
elif self.variance_type == "learned_range":
UpperCAmelCase_ : Dict = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase_ : List[Any] = variance * variance_noise
UpperCAmelCase_ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
UpperCAmelCase_ : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase_ : Union[str, Any] = timesteps.to(original_samples.device )
UpperCAmelCase_ : List[str] = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase_ : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Dict = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase_ : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Optional[int] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 23 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A , A ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(A ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 2 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : str = logging.get_logger(__name__)
A_ : Tuple = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase_ ( _lowerCamelCase ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase__ : Optional[int] = model_type_to_module_name(__lowerCAmelCase )
lowerCamelCase__ : int = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(__lowerCAmelCase , __lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__lowerCAmelCase , '__name__' , __lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase__ : List[Any] = importlib.import_module('transformers' )
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
return getattr(__lowerCAmelCase , __lowerCAmelCase )
return None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
lowerCamelCase__ : Union[str, Any] = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(__lowerCAmelCase , encoding='utf-8' ) as reader:
return json.load(__lowerCAmelCase )
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def a__ (cls, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = kwargs.pop('config', snake_case__ )
lowerCamelCase__ : List[Any] = kwargs.pop('trust_remote_code', snake_case__ )
lowerCamelCase__ : int = True
lowerCamelCase__ : str = ImageProcessingMixin.get_image_processor_dict(snake_case__, **snake_case__ )
lowerCamelCase__ : int = config_dict.get('image_processor_type', snake_case__ )
lowerCamelCase__ : List[str] = None
if "AutoImageProcessor" in config_dict.get('auto_map', {} ):
lowerCamelCase__ : Optional[int] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCamelCase__ : str = config_dict.pop('feature_extractor_type', snake_case__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
lowerCamelCase__ : List[str] = feature_extractor_class.replace('FeatureExtractor', 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map', {} ):
lowerCamelCase__ : Union[str, Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
lowerCamelCase__ : Union[str, Any] = feature_extractor_auto_map.replace('FeatureExtractor', 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case__, snake_case__ ):
lowerCamelCase__ : int = AutoConfig.from_pretrained(snake_case__, **snake_case__ )
# It could be in `config.image_processor_type``
lowerCamelCase__ : Optional[Any] = getattr(snake_case__, 'image_processor_type', snake_case__ )
if hasattr(snake_case__, 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
lowerCamelCase__ : Union[str, Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
lowerCamelCase__ : List[Any] = image_processor_class_from_name(snake_case__ )
lowerCamelCase__ : Optional[int] = image_processor_auto_map is not None
lowerCamelCase__ : Union[str, Any] = image_processor_class is not None or type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING
lowerCamelCase__ : Any = resolve_trust_remote_code(
snake_case__, snake_case__, snake_case__, snake_case__ )
if has_remote_code and trust_remote_code:
lowerCamelCase__ : Optional[Any] = get_class_from_dynamic_module(
snake_case__, snake_case__, **snake_case__ )
lowerCamelCase__ : int = kwargs.pop('code_revision', snake_case__ )
if os.path.isdir(snake_case__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case__, **snake_case__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case__, **snake_case__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING:
lowerCamelCase__ : List[str] = IMAGE_PROCESSOR_MAPPING[type(snake_case__ )]
return image_processor_class.from_dict(snake_case__, **snake_case__ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(snake_case__, snake_case__ )
| 362 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''autoformer'''
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Any , A : Optional[int] = None , A : Optional[int] = None , A : str = "student_t" , A : str = "nll" , A : int = 1 , A : List[int] = [1, 2, 3, 4, 5, 6, 7] , A : bool = True , A : int = 0 , A : int = 0 , A : int = 0 , A : int = 0 , A : Optional[List[int]] = None , A : Optional[List[int]] = None , A : int = 64 , A : int = 2 , A : int = 2 , A : int = 2 , A : int = 2 , A : int = 32 , A : int = 32 , A : str = "gelu" , A : float = 0.1 , A : float = 0.1 , A : float = 0.1 , A : float = 0.1 , A : float = 0.1 , A : int = 1_00 , A : float = 0.0_2 , A : bool = True , A : List[str]=True , A : int = 10 , A : int = 25 , A : int = 3 , **A : Tuple , ) -> str:
"""simple docstring"""
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length if context_length is not None else prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(self.lags_sequence) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
# Autoformer
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=A , **A)
@property
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 339 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(_UpperCAmelCase )
return s_dict
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = url.split('/' )[-2]
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = original_checkpoint['dims']
_UpperCAmelCase = original_checkpoint['model_state_dict']
_UpperCAmelCase = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 339 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowercase_ ( lowercase__ ):
_lowerCamelCase = """ctrl"""
_lowerCamelCase = ["""past_key_values"""]
_lowerCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=246_534 , lowercase_=256 , lowercase_=1_280 , lowercase_=8_192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
_snake_case : Tuple = vocab_size
_snake_case : Union[str, Any] = n_positions
_snake_case : List[str] = n_embd
_snake_case : Dict = n_layer
_snake_case : Optional[int] = n_head
_snake_case : List[str] = dff
_snake_case : Tuple = resid_pdrop
_snake_case : Optional[Any] = embd_pdrop
_snake_case : str = layer_norm_epsilon
_snake_case : List[str] = initializer_range
_snake_case : List[str] = use_cache
super().__init__(**lowercase_ )
| 351 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
import math
import sys
import cva
import numpy as np
def __lowerCAmelCase ( a__ , a__ ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__a = math.sqrt(a__ )
__a = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> np.ndarray:
__a = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __lowerCAmelCase ( a__ , a__ ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__a = np.zeros((kernel_size, kernel_size) )
for i in range(0 , a__ ):
for j in range(0 , a__ ):
__a = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , ) -> np.ndarray:
__a = np.zeros(img.shape )
__a = get_gauss_kernel(a__ , a__ )
__a , __a = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a = get_slice(a__ , a__ , a__ , a__ )
__a = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a = vec_gaussian(a__ , a__ )
__a = np.multiply(a__ , a__ )
__a = np.multiply(a__ , a__ )
__a = np.sum(a__ ) / np.sum(a__ )
__a = val
return imga
def __lowerCAmelCase ( a__ ) -> tuple:
__a = args[1] if args[1:] else '''../image_data/lena.jpg'''
__a = float(args[2] ) if args[2:] else 1.0
__a = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a = int(args[4] )
__a = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
A , A , A , A : Any = parse_args(sys.argv)
A : Any = cva.imread(filename, 0)
cva.imshow('input image', img)
A : str = img / 2_5_5
A : Dict = out.astype('float32')
A : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
A : Union[str, Any] = out * 2_5_5
A : Any = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 6 |
def __lowerCAmelCase ( a__ , a__ , a__ ) -> list:
__a = len(a__ )
__a = [[0] * n for i in range(a__ )]
for i in range(a__ ):
__a = y_points[i]
for i in range(2 , a__ ):
for j in range(a__ , a__ ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCamelCase_ : Tuple = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
lowerCamelCase_ : Union[str, Any] = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
lowerCamelCase_ : int = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
lowerCamelCase_ : List[str] = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
lowerCamelCase_ : List[Any] = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
for tf_name, hf_name in patterns:
__a = k.replace(__lowerCamelCase , __lowerCamelCase )
return k
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = BigBirdPegasusConfig(**__lowerCamelCase )
__a = BigBirdPegasusForConditionalGeneration(__lowerCamelCase )
__a = torch_model.state_dict()
__a = {}
# separating decoder weights
__a = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
__a = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
__a = [k.endswith(__lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCamelCase ):
continue
__a = DECODER_PATTERNS
__a = rename_state_dict_key(__lowerCamelCase , __lowerCamelCase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__a = v.T
__a = torch.from_numpy(__lowerCamelCase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
__a = [k.endswith(__lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCamelCase ):
continue
__a = REMAINING_PATTERNS
__a = rename_state_dict_key(__lowerCamelCase , __lowerCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__a = v.T
__a = torch.from_numpy(__lowerCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__a = mapping['model.embed_positions.weight']
__a = mapping.pop('model.embed_positions.weight' )
__a , __a = torch_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
__a = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCAmelCase( __lowerCamelCase ):
__a = tf.train.list_variables(__lowerCamelCase )
__a = {}
__a = ['global_step']
for name, shape in tqdm(__lowerCamelCase , desc='converting tf checkpoint to dict' ):
__a = any(pat in name for pat in ignore_name )
if skip_key:
continue
__a = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
__a = array
return tf_weights
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = get_tf_weights_as_numpy(__lowerCamelCase )
__a = convert_bigbird_pegasus(__lowerCamelCase , __lowerCamelCase )
torch_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase_ : Dict = parser.parse_args()
lowerCamelCase_ : Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 197 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
UpperCamelCase__: str = None
UpperCamelCase__: int = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
UpperCamelCase__: List[Any] = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : List[Any]=256 ) -> Optional[Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
with open(_lowerCAmelCase , '''r''' ) as f:
return json.load(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ) -> Optional[Any]:
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=True ) -> List[Any]:
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp''' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase : List[str] = read_json(os.path.join(_lowerCAmelCase , '''params.json''' ) )
UpperCAmelCase : str = NUM_SHARDS[model_size]
UpperCAmelCase : Any = params['''n_layers''']
UpperCAmelCase : str = params['''n_heads''']
UpperCAmelCase : Any = n_heads // num_shards
UpperCAmelCase : List[str] = params['''dim''']
UpperCAmelCase : Optional[Any] = dim // n_heads
UpperCAmelCase : str = 1_0_0_0_0.0
UpperCAmelCase : Optional[int] = 1.0 / (base ** (torch.arange(0 , _lowerCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase : Tuple = params['''n_kv_heads'''] # for GQA / MQA
UpperCAmelCase : Optional[int] = n_heads_per_shard // num_key_value_heads
UpperCAmelCase : Optional[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase : List[str] = n_heads
UpperCAmelCase : Optional[int] = n_heads_per_shard
UpperCAmelCase : List[str] = dim
# permute for sliced rotary
def permute(_lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=n_heads , _lowerCAmelCase : int=dim , _lowerCAmelCase : Dict=dim ):
return w.view(_lowerCAmelCase , dima // n_heads // 2 , 2 , _lowerCAmelCase ).transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase : int = torch.load(os.path.join(_lowerCAmelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
UpperCAmelCase : Optional[Any] = [
torch.load(os.path.join(_lowerCAmelCase , f"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(_lowerCAmelCase )
]
UpperCAmelCase : Any = 0
UpperCAmelCase : str = {'''weight_map''': {}}
for layer_i in range(_lowerCAmelCase ):
UpperCAmelCase : Optional[Any] = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase : Optional[int] = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase : List[str] = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
UpperCAmelCase : Union[str, Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : Optional[Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
UpperCAmelCase : str = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(_lowerCAmelCase )
] , dim=0 , ).reshape(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
UpperCAmelCase : Any = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
UpperCAmelCase : str = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(_lowerCAmelCase )] , dim=1 )
UpperCAmelCase : Tuple = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(_lowerCAmelCase )] , dim=0 )
UpperCAmelCase : Any = inv_freq
for k, v in state_dict.items():
UpperCAmelCase : List[Any] = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : Optional[int] = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase : str = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
UpperCAmelCase : Any = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_lowerCAmelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_lowerCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase : Optional[int] = filename
param_count += v.numel()
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
# Write configs
UpperCAmelCase : Union[str, Any] = {'''total_size''': param_count * 2}
write_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , '''pytorch_model.bin.index.json''' ) )
UpperCAmelCase : int = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
UpperCAmelCase : Tuple = params['''multiple_of'''] if '''multiple_of''' in params else 256
UpperCAmelCase : Any = LlamaConfig(
hidden_size=_lowerCAmelCase , intermediate_size=compute_intermediate_size(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_lowerCAmelCase , )
config.save_pretrained(_lowerCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_lowerCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_lowerCAmelCase , safe_serialization=_lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ) -> List[str]:
# Initialize the tokenizer based on the `spm` model
UpperCAmelCase : Dict = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
UpperCAmelCase : List[Any] = tokenizer_class(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
def snake_case_ ( ) -> List[Any]:
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_lowerCAmelCase , help='''Whether or not to save using `safetensors`.''' )
UpperCAmelCase : List[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase : Optional[int] = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 23 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Tuple = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
UpperCAmelCase : Tuple = 1024
UpperCAmelCase : List[Any] = 4096
UpperCAmelCase : str = 24
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = [5, 11, 17, 23]
UpperCAmelCase : List[Any] = [256, 512, 1024, 1024]
UpperCAmelCase : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 768
UpperCAmelCase : Tuple = [1, 1, 1, 0.5]
UpperCAmelCase : int = [256, 512, 768, 768]
UpperCAmelCase : Any = 150
UpperCAmelCase : Tuple = 16
UpperCAmelCase : Any = (1, 384, 384)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = '''project'''
if "ade" in checkpoint_url:
UpperCAmelCase : Any = True
UpperCAmelCase : str = 768
UpperCAmelCase : Optional[int] = [1, 1, 1, 0.5]
UpperCAmelCase : List[Any] = 150
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = '''huggingface/label-files'''
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[Any] = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase : int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
UpperCAmelCase : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase : str = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase : Any = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase : str = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase : int = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase : int = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase : Any = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase : Tuple = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase : Dict = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
UpperCAmelCase : Optional[int] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
UpperCAmelCase : Optional[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
UpperCAmelCase : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
UpperCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase : Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : int = in_proj_bias[: config.hidden_size]
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Any:
UpperCAmelCase , UpperCAmelCase : int = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase : Any = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase : Optional[Any] = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase : int = 480 if '''ade''' in checkpoint_url else 384
UpperCAmelCase : List[Any] = DPTImageProcessor(size=_lowerCAmelCase )
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''pt''' )
# forward pass
UpperCAmelCase : Any = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
UpperCAmelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
UpperCamelCase__: Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 23 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
_UpperCAmelCase : List[Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_UpperCAmelCase : Optional[Any] = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCAmelCase_ , output_all_encodings=lowerCAmelCase_ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCAmelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_UpperCAmelCase : Dict = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
_UpperCAmelCase : Optional[int] = os.path.join(get_home_dir() , """models""" )
_UpperCAmelCase : Optional[Any] = _load_vocab(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls=lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = nlp.model.BERTModel(
lowerCAmelCase_ , len(lowerCAmelCase_ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCAmelCase_ , use_token_type_embed=lowerCAmelCase_ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCAmelCase_ , use_decoder=lowerCAmelCase_ , )
original_bort.load_parameters(lowerCAmelCase_ , cast_dtype=lowerCAmelCase_ , ignore_extra=lowerCAmelCase_ )
_UpperCAmelCase : str = original_bort._collect_params_with_prefix()
# Build our config 🤗
_UpperCAmelCase : Tuple = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCAmelCase_ ),
}
_UpperCAmelCase : List[str] = BertConfig.from_dict(lowerCAmelCase_ )
_UpperCAmelCase : int = BertForMaskedLM(lowerCAmelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCAmelCase_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = hf_param.shape
_UpperCAmelCase : str = to_torch(params[gluon_param] )
_UpperCAmelCase : Optional[int] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
_UpperCAmelCase : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
_UpperCAmelCase : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
_UpperCAmelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
_UpperCAmelCase : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_UpperCAmelCase : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_UpperCAmelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
_UpperCAmelCase : BertSelfAttention = layer.attention.self
_UpperCAmelCase : str = check_and_map_params(
self_attn.key.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
_UpperCAmelCase : str = check_and_map_params(
self_attn.key.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
_UpperCAmelCase : Tuple = check_and_map_params(
self_attn.query.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
_UpperCAmelCase : str = check_and_map_params(
self_attn.query.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
_UpperCAmelCase : Optional[Any] = check_and_map_params(
self_attn.value.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
_UpperCAmelCase : int = check_and_map_params(
self_attn.value.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
_UpperCAmelCase : BertSelfOutput = layer.attention.output
_UpperCAmelCase : Any = check_and_map_params(
self_output.dense.bias , f"encoder.transformer_cells.{i}.proj.bias" )
_UpperCAmelCase : List[Any] = check_and_map_params(
self_output.dense.weight , f"encoder.transformer_cells.{i}.proj.weight" )
_UpperCAmelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.layer_norm.beta" )
_UpperCAmelCase : Tuple = check_and_map_params(
self_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
_UpperCAmelCase : BertIntermediate = layer.intermediate
_UpperCAmelCase : Dict = check_and_map_params(
intermediate.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
_UpperCAmelCase : Optional[int] = check_and_map_params(
intermediate.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
_UpperCAmelCase : BertOutput = layer.output
_UpperCAmelCase : str = check_and_map_params(
bert_output.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
_UpperCAmelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
_UpperCAmelCase : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
_UpperCAmelCase : Dict = check_and_map_params(
bert_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_UpperCAmelCase : int = RobertaTokenizer.from_pretrained("""roberta-base""" )
_UpperCAmelCase : Any = tokenizer.encode_plus(lowerCAmelCase_ )["""input_ids"""]
# Get gluon output
_UpperCAmelCase : List[str] = mx.nd.array([input_ids] )
_UpperCAmelCase : Any = original_bort(inputs=lowerCAmelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Any = BertModel.from_pretrained(lowerCAmelCase_ )
hf_bort_model.eval()
_UpperCAmelCase : Dict = tokenizer.encode_plus(lowerCAmelCase_ , return_tensors="""pt""" )
_UpperCAmelCase : Any = hf_bort_model(**lowerCAmelCase_ )[0]
_UpperCAmelCase : Optional[int] = output_gluon[0].asnumpy()
_UpperCAmelCase : Optional[int] = output_hf[0].detach().numpy()
_UpperCAmelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_UpperCAmelCase : Dict = np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 170 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = None , ):
super().__init__()
_UpperCAmelCase : str = initial_learning_rate
_UpperCAmelCase : str = warmup_steps
_UpperCAmelCase : str = power
_UpperCAmelCase : Any = decay_schedule_fn
_UpperCAmelCase : List[Any] = name
def __call__(self , lowerCAmelCase__ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_UpperCAmelCase : List[Any] = tf.cast(lowerCAmelCase__ , tf.floataa )
_UpperCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
_UpperCAmelCase : Union[str, Any] = global_step_float / warmup_steps_float
_UpperCAmelCase : List[Any] = self.initial_learning_rate * tf.math.pow(lowerCAmelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCAmelCase__ , )
def snake_case_ (self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 0.9 , lowerCAmelCase_ = 0.999 , lowerCAmelCase_ = 1e-8 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = None , ):
_UpperCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase_ , )
if num_warmup_steps:
_UpperCAmelCase : Optional[int] = WarmUp(
initial_learning_rate=lowerCAmelCase_ , decay_schedule_fn=lowerCAmelCase_ , warmup_steps=lowerCAmelCase_ , )
if weight_decay_rate > 0.0:
_UpperCAmelCase : Any = AdamWeightDecay(
learning_rate=lowerCAmelCase_ , weight_decay_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=lowerCAmelCase_ , )
else:
_UpperCAmelCase : str = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ = 0.0_0_1 , lowerCAmelCase__ = 0.9 , lowerCAmelCase__ = 0.9_9_9 , lowerCAmelCase__ = 1e-7 , lowerCAmelCase__ = False , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "AdamWeightDecay" , **lowerCAmelCase__ , ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = weight_decay_rate
_UpperCAmelCase : Tuple = include_in_weight_decay
_UpperCAmelCase : List[Any] = exclude_from_weight_decay
@classmethod
def snake_case_ (cls , lowerCAmelCase__ ):
_UpperCAmelCase : str = {"""WarmUp""": WarmUp}
return super(lowerCAmelCase__ , cls ).from_config(lowerCAmelCase__ , custom_objects=lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
super(lowerCAmelCase__ , self )._prepare_local(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Dict = list(zip(*lowerCAmelCase__ ) )
return super(lowerCAmelCase__ , self ).apply_gradients(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , name=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_UpperCAmelCase : List[Any] = apply_state or {}
_UpperCAmelCase : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_UpperCAmelCase : Dict = self._fallback_apply_state(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase__ , self )._resource_apply_dense(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase__ , self )._resource_apply_sparse(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def snake_case_ (self , lowerCAmelCase__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ) is not None:
return False
return True
class __lowerCAmelCase ( __a ):
def __init__(self ):
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[Any] = None
@property
def snake_case_ (self ):
if self._accum_steps is None:
_UpperCAmelCase : str = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def snake_case_ (self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__(self , lowerCAmelCase__ ):
if not self._gradients:
_UpperCAmelCase : Optional[int] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase__ ) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCAmelCase__ ) != len(self._gradients ):
raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowerCAmelCase__ )}" )
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase__ )
self._accum_steps.assign_add(1 )
def snake_case_ (self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase__ ) )
| 170 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
A__ : str =re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
A__ : Any =10
A__ : Optional[int] =2_56
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if len(lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=lowerCAmelCase )
for token in set(lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(lowerCAmelCase ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self : Any , *,
__snake_case : float = 0.85 , ) -> List[Any]:
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : Tuple , __snake_case : MinHash ) -> None:
_lowerCAmelCase = self._index.query(__snake_case )
if code_key in self._index.keys:
print(f"Duplicate key {code_key}" )
return
self._index.insert(__snake_case , __snake_case )
if len(__snake_case ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__snake_case )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__snake_case )
def lowercase__ ( self : Optional[Any] ) -> List[List[Dict]]:
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(__snake_case )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__snake_case )
return duplicate_clusters
def lowercase__ ( self : str , __snake_case : Any ) -> None:
_lowerCAmelCase = self.get_duplicate_clusters()
with open(__snake_case , """w""" ) as f:
json.dump(__snake_case , __snake_case )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(lowerCAmelCase , lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = get_tokens(lowerCAmelCase )
_lowerCAmelCase = get_tokens(lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
A__ : Tuple =None
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase , lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(lowerCAmelCase )
return extremes
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase , lowerCAmelCase , ) , total=len(lowerCAmelCase ) , ):
extremes_list.append(lowerCAmelCase )
return extremes_list
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = 0.85 ):
"""simple docstring"""
_lowerCAmelCase = make_duplicate_clusters(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda lowerCAmelCase , lowerCAmelCase : idx not in remove_indices , with_indices=lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(lowerCAmelCase )}" )
print(f"Number of duplicate clusters: {len(lowerCAmelCase )}" )
print(f"Files in duplicate cluster: {len(lowerCAmelCase )}" )
print(f"Unique files in duplicate cluster: {len(lowerCAmelCase )}" )
print(f"Filtered dataset size: {len(lowerCAmelCase )}" )
return ds_filter, duplicate_clusters
| 70 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
# Initialise PyTorch model
UpperCAmelCase : str = FunnelConfig.from_json_file(UpperCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase : List[str] = FunnelBaseModel(UpperCAmelCase ) if base_model else FunnelModel(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 99 |
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCAmelCase : List[Any] = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[str] = str(bin(UpperCAmelCase ) )[2:]
UpperCAmelCase : Optional[Any] = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 1 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/grid.txt''' ) as f:
lowercase__ : Optional[int] = [] # noqa: E741
for _ in range(20 ):
l.append([int(__lowerCamelCase ) for x in f.readline().split()] )
lowercase__ : Optional[int] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowercase__ : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowercase__ : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowercase__ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowercase__ : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowercase__ : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowercase__ : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowercase__ : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowercase__ : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 16 |
def a_ ( lowerCAmelCase_ : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__lowerCAmelCase = 4
__lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
__lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 284 | 0 |
"""simple docstring"""
import copy
import re
class __UpperCamelCase :
SCREAMING_SNAKE_CASE = "hp"
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = None
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str):
A = prefix
A = defaults
cls.build_naming_info()
@staticmethod
def SCREAMING_SNAKE_CASE__ (__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]):
if len(__SCREAMING_SNAKE_CASE) == 0:
return ""
A = None
if any(char.isdigit() for char in word):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""")
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__SCREAMING_SNAKE_CASE) + 1):
A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__SCREAMING_SNAKE_CASE : Optional[int]):
A = ""
while integer != 0:
A = chr(ord("A") + integer % 1_0) + s
integer //= 1_0
return s
A = 0
while True:
A = word + "#" + int_to_alphabetic(__SCREAMING_SNAKE_CASE)
if sword in info["reverse_short_word"]:
continue
else:
A = sword
break
A = short_word
A = word
return short_word
@staticmethod
def SCREAMING_SNAKE_CASE__ (__SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
A = param_name.split("_")
A = [TrialShortNamer.shortname_for_word(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A = ["", "_"]
for separator in separators:
A = separator.join(__SCREAMING_SNAKE_CASE)
if shortname not in info["reverse_short_param"]:
A = shortname
A = param_name
return shortname
return param_name
@staticmethod
def SCREAMING_SNAKE_CASE__ (__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any):
A = TrialShortNamer.shortname_for_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = short_name
A = param_name
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : Optional[Any]):
if cls.NAMING_INFO is not None:
return
A = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
A = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = info
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]):
cls.build_naming_info()
assert cls.PREFIX is not None
A = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""")
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A = cls.NAMING_INFO["short_param"][k]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
A = 1 if v else 0
A = "" if isinstance(__SCREAMING_SNAKE_CASE , (int, float)) else "-"
A = F"""{key}{sep}{v}"""
name.append(__SCREAMING_SNAKE_CASE)
return "_".join(__SCREAMING_SNAKE_CASE)
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple):
A = repr[len(cls.PREFIX) + 1 :]
if repr == "":
A = []
else:
A = repr.split("_")
A = {}
for value in values:
if "-" in value:
A , A = value.split("-")
else:
A = re.sub("[0-9.]" , "" , __SCREAMING_SNAKE_CASE)
A = float(re.sub("[^0-9.]" , "" , __SCREAMING_SNAKE_CASE))
A = cls.NAMING_INFO["reverse_short_param"][p_k]
A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A = cls.DEFAULTS[k]
return parameters
| 363 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
__A : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__A : str = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A = "lm_head"
A = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
A = getattr(lowercase__ , lowercase__ ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == "group" , )
A = True
else:
for key, mapped_key in MAPPING.items():
A = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A = True
if "*" in mapped_key:
A = name.split(lowercase__ )[0].split("." )[-2]
A = mapped_key.replace("*" , lowercase__ )
if "weight_g" in name:
A = "weight_g"
elif "weight_v" in name:
A = "weight_v"
elif "bias" in name:
A = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = "weight"
else:
A = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = full_name.split("conv_layers." )[-1]
A = name.split("." )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True ):
"""simple docstring"""
if config_path is not None:
A = UniSpeechConfig.from_pretrained(lowercase__ )
else:
A = UniSpeechConfig()
if is_finetuned:
if dict_path:
A = Dictionary.load_from_json(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A = target_dict.pad_index
A = target_dict.bos_index
A = target_dict.eos_index
A = len(target_dict.symbols )
A = os.path.join(lowercase__ , "vocab.json" )
if not os.path.isdir(lowercase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
A = target_dict.indices
# fairseq has the <pad> and <s> switched
A = 42
A = 43
with open(lowercase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowercase__ , lowercase__ )
A = WavaVecaPhonemeCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase__ , )
A = True if config.feat_extract_norm == "layer" else False
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
A = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
A = UniSpeechForCTC(lowercase__ )
else:
A = UniSpeechForPreTraining(lowercase__ )
if is_finetuned:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , lowercase__ )
hf_unispeech.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__A : int = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 57 | 0 |
"""simple docstring"""
import numpy as np
from PIL import Image
def UpperCAmelCase__ ( lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
lowercase = np.array(lowerCAmelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
# compute the shape of the output matrix
lowercase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowercase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowercase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase = 0
lowercase = 0
return updated_arr
def UpperCAmelCase__ ( lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
lowercase = np.array(lowerCAmelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
# compute the shape of the output matrix
lowercase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowercase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowercase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase = 0
lowercase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
__lowerCAmelCase : Union[str, Any] =Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 197 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] ) -> Optional[Any]:
'''simple docstring'''
return np.maximum(0 , lowerCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 197 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowercase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ : Dict = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=8) -> Tuple:
a = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
a = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , A , A , A , A , A , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=A__ , tokenizer=A__ , unet=A__ , scheduler=A__ , movq=A__ , )
a = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , A , A , A , A , A , A ) -> Tuple:
'''simple docstring'''
if latents is None:
a = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
a = latents.to(A__ )
a = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , A , A , A , A , A=None , ) -> Dict:
'''simple docstring'''
a = len(A__ ) if isinstance(A__ , A__ ) else 1
# get prompt text embeddings
a = self.tokenizer(
A__ , padding="max_length" , truncation=A__ , max_length=77 , return_attention_mask=A__ , add_special_tokens=A__ , return_tensors="pt" , )
a = text_inputs.input_ids
a = self.tokenizer(A__ , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A__ , A__ ):
a = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
a = text_input_ids.to(A__ )
a = text_inputs.attention_mask.to(A__ )
a , a = self.text_encoder(
input_ids=A__ , attention_mask=A__ )
a = prompt_embeds.repeat_interleave(A__ , dim=0 )
a = text_encoder_hidden_states.repeat_interleave(A__ , dim=0 )
a = text_mask.repeat_interleave(A__ , dim=0 )
if do_classifier_free_guidance:
a = 42
if negative_prompt is None:
a = [""] * batch_size
elif type(A__ ) is not type(A__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(A__ )} !='''
F''' {type(A__ )}.''' )
elif isinstance(A__ , A__ ):
a = [negative_prompt]
elif batch_size != len(A__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(A__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
a = negative_prompt
a = self.tokenizer(
A__ , padding="max_length" , max_length=77 , truncation=A__ , return_attention_mask=A__ , add_special_tokens=A__ , return_tensors="pt" , )
a = uncond_input.input_ids.to(A__ )
a = uncond_input.attention_mask.to(A__ )
a , a = self.text_encoder(
input_ids=A__ , attention_mask=A__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a = negative_prompt_embeds.shape[1]
a = negative_prompt_embeds.repeat(1 , A__ )
a = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A__ )
a = uncond_text_encoder_hidden_states.shape[1]
a = uncond_text_encoder_hidden_states.repeat(1 , A__ , 1 )
a = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A__ , -1 )
a = uncond_text_mask.repeat_interleave(A__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a = torch.cat([negative_prompt_embeds, prompt_embeds] )
a = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
a = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase_ ( self , A=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a = torch.device(F'''cuda:{gpu_id}''' )
a = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A__ , A__ )
def lowerCAmelCase_ ( self , A=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
a = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=A__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
a , a = cpu_offload_with_hook(A__ , A__ , prev_module_hook=A__ )
if self.safety_checker is not None:
a , a = cpu_offload_with_hook(self.safety_checker , A__ , prev_module_hook=A__ )
# We'll offload the last model manually.
a = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A__ )
def __call__( self , A , A , A , A = None , A = 512 , A = 512 , A = 100 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> List[str]:
'''simple docstring'''
if isinstance(A__ , A__ ):
a = 1
elif isinstance(A__ , A__ ):
a = len(A__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A__ )}''' )
a = self._execution_device
a = batch_size * num_images_per_prompt
a = guidance_scale > 1.0
a , a , a = self._encode_prompt(
A__ , A__ , A__ , A__ , A__ )
if isinstance(A__ , A__ ):
a = torch.cat(A__ , dim=0 )
if isinstance(A__ , A__ ):
a = torch.cat(A__ , dim=0 )
if do_classifier_free_guidance:
a = image_embeds.repeat_interleave(A__ , dim=0 )
a = negative_image_embeds.repeat_interleave(A__ , dim=0 )
a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A__ )
self.scheduler.set_timesteps(A__ , device=A__ )
a = self.scheduler.timesteps
a = self.unet.config.in_channels
a , a = get_new_h_w(A__ , A__ , self.movq_scale_factor )
# create initial latent
a = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A__ , A__ , A__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
a = self.unet(
sample=A__ , timestep=A__ , encoder_hidden_states=A__ , added_cond_kwargs=A__ , return_dict=A__ , )[0]
if do_classifier_free_guidance:
a , a = noise_pred.split(latents.shape[1] , dim=1 )
a , a = noise_pred.chunk(2 )
a , a = variance_pred.chunk(2 )
a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a = self.scheduler.step(
A__ , A__ , A__ , generator=A__ , ).prev_sample
# post-processing
a = self.movq.decode(A__ , force_not_quantize=A__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
a = image * 0.5 + 0.5
a = image.clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 359 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase__ : Optional[int] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowercase__ : int = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowercase__ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowercase__ : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase_ ( self , A , A , A=0.9 , A=3 , A=0.5 ) -> Tuple:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
a = [
meteor_score.single_meteor_score(
word_tokenize(A ) , word_tokenize(A ) , alpha=A , beta=A , gamma=A )
for ref, pred in zip(A , A )
]
else:
a = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A )
for ref, pred in zip(A , A )
]
return {"meteor": np.mean(A )}
| 180 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowercase : Union[str, Any] =logging.get_logger(__name__)
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> None:
"""simple docstring"""
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 170 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_lowercase : Optional[int] =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
a__ : List[str] = eval_examples
a__ : List[str] = post_process_function
a__ : Union[str, Any] = quant_trainer_args
a__ : Optional[Any] = 1_2_8 # default number of calibration samples
def SCREAMING_SNAKE_CASE__( self , __lowercase=None ) -> Any:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
a__ : Optional[int] = calib_dataset if calib_dataset is not None else self.calib_dataset
a__ : List[Any] = self._remove_unused_columns(__lowercase , description="""Calibration""" )
return DataLoader(
__lowercase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowercase , )
def SCREAMING_SNAKE_CASE__( self , __lowercase=None ) -> str:
"""simple docstring"""
a__ : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
a__ : Tuple = self.get_calib_dataloader(__lowercase )
a__ : Tuple = self.model
quant_trainer.configure_model(__lowercase , self.quant_trainer_args , calib=__lowercase )
model.eval()
quant_trainer.enable_calibration(__lowercase )
logger.info("""***** Running calibration *****""" )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowercase ):
# Prediction step
a__ , a__ , a__ : List[str] = self.prediction_step(__lowercase , __lowercase , prediction_loss_only=__lowercase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowercase , self.quant_trainer_args )
a__ : Union[str, Any] = model
def SCREAMING_SNAKE_CASE__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = "eval" ) -> int:
"""simple docstring"""
a__ : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
a__ : List[Any] = self.get_eval_dataloader(__lowercase )
a__ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a__ : Dict = self.compute_metrics
a__ : Dict = None
a__ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ : str = eval_loop(
__lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , )
finally:
a__ : Tuple = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
a__ : int = self.post_process_function(__lowercase , __lowercase , output.predictions )
a__ : str = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a__ : Any = metrics.pop(__lowercase )
self.log(__lowercase )
else:
a__ : Any = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a__ : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowercase )
return metrics
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase=None , __lowercase = "test" ) -> List[str]:
"""simple docstring"""
a__ : str = self.get_test_dataloader(__lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a__ : str = self.compute_metrics
a__ : List[str] = None
a__ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ : Dict = eval_loop(
__lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , )
finally:
a__ : Optional[int] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
a__ : Optional[Any] = self.post_process_function(__lowercase , __lowercase , output.predictions , """predict""" )
a__ : Any = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a__ : Any = metrics.pop(__lowercase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase="./" ) -> str:
"""simple docstring"""
a__ : Any = self.eval_dataset
a__ : Optional[Any] = self.get_eval_dataloader(__lowercase )
a__ : List[str] = next(iter(__lowercase ) )
# saving device - to make it consistent
a__ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
a__ : Any = tuple(v.to(__lowercase ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
a__ : Dict = True
a__ : Tuple = self.model.to(__lowercase )
model.eval()
model.float()
a__ : Optional[Any] = model.module if hasattr(__lowercase , """module""" ) else model
quant_trainer.configure_model(__lowercase , self.quant_trainer_args )
a__ : int = os.path.join(__lowercase , """model.onnx""" )
logger.info(F'''exporting model to {output_model_file}''' )
a__ : List[Any] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
__lowercase , __lowercase , __lowercase , export_params=__lowercase , opset_version=1_3 , do_constant_folding=__lowercase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=__lowercase , )
logger.info("""onnx export finished""" )
| 170 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def _A (*lowerCAmelCase , **lowerCAmelCase ):
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
UpperCamelCase_ : str =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
__lowercase= [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= vqa_pipeline(lowerCAmelCase , top_k=1 )
self.assertEqual(
lowerCAmelCase , [
[{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}],
[{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}],
] , )
@require_torch
def _A (self ):
__lowercase= pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
__lowercase= './tests/fixtures/tests_samples/COCO/000000039769.png'
__lowercase= 'How many cats are there?'
__lowercase= vqa_pipeline(image=lowerCAmelCase , question='How many cats are there?' , top_k=2 )
self.assertEqual(
lowerCAmelCase , [{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}, {'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}] )
__lowercase= vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
lowerCAmelCase , [{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}, {'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}] )
@slow
@require_torch
def _A (self ):
__lowercase= pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
__lowercase= './tests/fixtures/tests_samples/COCO/000000039769.png'
__lowercase= 'How many cats are there?'
__lowercase= vqa_pipeline(image=lowerCAmelCase , question=lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [{'score': 0.87_99, 'answer': '2'}, {'score': 0.2_96, 'answer': '1'}] )
__lowercase= vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [{'score': 0.87_99, 'answer': '2'}, {'score': 0.2_96, 'answer': '1'}] )
__lowercase= vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [[{'score': 0.87_99, 'answer': '2'}, {'score': 0.2_96, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _A (self ):
pass
| 304 |
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304 | 1 |
lowercase : List[Any] = 2_5_6
# Modulus to hash a string
lowercase : Any = 1_0_0_0_0_0_3
def A_ ( A__ , A__ ) -> bool:
a__ : List[str] = len(A__ )
a__ : List[Any] = len(A__ )
if p_len > t_len:
return False
a__ : Union[str, Any] = 0
a__ : Union[str, Any] = 0
a__ : Tuple = 1
# Calculating the hash of pattern and substring of text
for i in range(A__ ):
a__ : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
a__ : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
a__ : List[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
a__ : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def A_ ( ) -> None:
a__ : str = 'abc1abc12'
a__ : Optional[Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
a__ : List[Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(A__ , A__ ) and not rabin_karp(A__ , A__ )
# Test 2)
a__ : int = 'ABABX'
a__ : int = 'ABABZABABYABABX'
assert rabin_karp(A__ , A__ )
# Test 3)
a__ : List[Any] = 'AAAB'
a__ : Optional[Any] = 'ABAAAAAB'
assert rabin_karp(A__ , A__ )
# Test 4)
a__ : List[str] = 'abcdabcy'
a__ : Optional[int] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(A__ , A__ )
# Test 5)
a__ : Optional[Any] = 'Lü'
a__ : Tuple = 'Lüsai'
assert rabin_karp(A__ , A__ )
a__ : Dict = 'Lue'
assert not rabin_karp(A__ , A__ )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 99 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase : List[str] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase : List[Any] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = '''whisper'''
__A : List[Any] = ['''past_key_values''']
__A : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase=5_1865 , lowercase=80 , lowercase=6 , lowercase=4 , lowercase=6 , lowercase=4 , lowercase=1536 , lowercase=1536 , lowercase=0.0 , lowercase=0.0 , lowercase=5_0257 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=256 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=False , lowercase=1500 , lowercase=448 , lowercase=5_0256 , lowercase=5_0256 , lowercase=5_0256 , lowercase=None , lowercase=[220, 5_0256] , lowercase=False , lowercase=256 , lowercase=False , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=7 , **lowercase , ) -> str:
'''simple docstring'''
a__ : int = vocab_size
a__ : int = num_mel_bins
a__ : Optional[int] = d_model
a__ : List[str] = encoder_layers
a__ : Dict = encoder_attention_heads
a__ : List[str] = decoder_layers
a__ : Tuple = decoder_attention_heads
a__ : List[str] = decoder_ffn_dim
a__ : Optional[Any] = encoder_ffn_dim
a__ : Tuple = dropout
a__ : Optional[int] = attention_dropout
a__ : Any = activation_dropout
a__ : Any = activation_function
a__ : List[Any] = init_std
a__ : Optional[int] = encoder_layerdrop
a__ : Union[str, Any] = decoder_layerdrop
a__ : Tuple = use_cache
a__ : List[str] = encoder_layers
a__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Dict = max_source_positions
a__ : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ : Optional[int] = classifier_proj_size
a__ : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : List[Any] = apply_spec_augment
a__ : int = mask_time_prob
a__ : int = mask_time_length
a__ : List[Any] = mask_time_min_masks
a__ : str = mask_feature_prob
a__ : Optional[int] = mask_feature_length
a__ : Union[str, Any] = mask_feature_min_masks
a__ : Tuple = median_filter_width
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , suppress_tokens=lowercase , begin_suppress_tokens=lowercase , **lowercase , )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
a__ : List[str] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
])
if self.use_past:
a__ : Optional[Any] = {0: 'batch'}
else:
a__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='inputs')
return common_inputs
def __lowercase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 2_2050 , lowercase = 5.0 , lowercase = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = OrderedDict()
a__ : int = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase , framework=lowercase , sampling_rate=lowercase , time_duration=lowercase , frequency=lowercase , )
a__ : List[Any] = encoder_inputs['input_features'].shape[2]
a__ : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
a__ : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase , lowercase , lowercase , lowercase)
a__ : List[str] = encoder_inputs.pop('input_features')
a__ : Optional[int] = decoder_inputs.pop('decoder_input_ids')
if "past_key_values" in decoder_inputs:
a__ : List[str] = decoder_inputs.pop('past_key_values')
return dummy_inputs
@property
def __lowercase ( self) -> float:
'''simple docstring'''
return 1e-3
| 99 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = KandinskyVaaPriorPipeline
UpperCAmelCase__ : List[Any] = ["prompt"]
UpperCAmelCase__ : List[str] = ["prompt", "negative_prompt"]
UpperCAmelCase__ : List[str] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Union[str, Any] = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> Optional[Any]:
return 3_2
@property
def __lowercase ( self ) -> Union[str, Any]:
return self.time_input_dim
@property
def __lowercase ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Union[str, Any]:
return 1_0_0
@property
def __lowercase ( self ) -> str:
_a : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowercase ( self ) -> Any:
torch.manual_seed(0 )
_a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_a )
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : Union[str, Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_2,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
_a : Dict = PriorTransformer(**_a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_a : str = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowercase ( self ) -> int:
torch.manual_seed(0 )
_a : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
_a : Optional[Any] = CLIPVisionModelWithProjection(_a )
return model
@property
def __lowercase ( self ) -> Optional[Any]:
_a : str = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=_a , do_normalize=_a , do_resize=_a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def __lowercase ( self ) -> Dict:
_a : int = self.dummy_prior
_a : int = self.dummy_image_encoder
_a : int = self.dummy_text_encoder
_a : Optional[int] = self.dummy_tokenizer
_a : Any = self.dummy_image_processor
_a : Union[str, Any] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=_a , clip_sample_range=10.0 , )
_a : Tuple = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowercase ( self , _a , _a=0 ) -> Tuple:
if str(_a ).startswith('''mps''' ):
_a : Dict = torch.manual_seed(_a )
else:
_a : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_a : Tuple = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = '''cpu'''
_a : Tuple = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[Any] = pipe(**self.get_dummy_inputs(_a ) )
_a : Any = output.image_embeds
_a : Optional[int] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Dict = image[0, -1_0:]
_a : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
_a : str = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowercase ( self ) -> int:
_a : Union[str, Any] = torch_device == '''cpu'''
_a : Dict = True
_a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_a , relax_max_difference=_a , test_mean_pixel_difference=_a , )
@skip_mps
def __lowercase ( self ) -> Union[str, Any]:
_a : List[Any] = torch_device == '''cpu'''
_a : Any = False
self._test_attention_slicing_forward_pass(
test_max_difference=_a , test_mean_pixel_difference=_a , )
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
_SCREAMING_SNAKE_CASE = "bert-base-cased"
_SCREAMING_SNAKE_CASE = "fp16"
_SCREAMING_SNAKE_CASE = "bf16"
_SCREAMING_SNAKE_CASE = [FPaa, BFaa]
@require_fsdp
@require_cuda
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
super().setUp()
UpperCamelCase = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__a ):
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = f"""{i + 1}"""
UpperCamelCase = strategy
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__a ):
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = prefetch_policy
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__a ):
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = state_dict_type
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = AutoModel.from_pretrained(__a )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCamelCase = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
UpperCamelCase = """2000"""
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = """TRANSFORMER_BASED_WRAP"""
UpperCamelCase = """T5Layer"""
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
with self.assertRaises(__a ) as cm:
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = """SIZE_BASED_WRAP"""
UpperCamelCase = """0"""
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = mp_dtype
with mockenv_context(**__a ):
UpperCamelCase = Accelerator()
if mp_dtype == "fp16":
UpperCamelCase = torch.floataa
elif mp_dtype == "bf16":
UpperCamelCase = torch.bfloataa
UpperCamelCase = MixedPrecision(param_dtype=__a , reduce_dtype=__a , buffer_dtype=__a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__a )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCamelCase = self.dist_env.copy()
UpperCamelCase = str(__a ).lower()
with mockenv_context(**__a ):
UpperCamelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__a ) )
@require_fsdp
@require_multi_gpu
@slow
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
UpperCamelCase = 0.8_2
UpperCamelCase = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
UpperCamelCase = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCamelCase = 160
UpperCamelCase = 160
UpperCamelCase = inspect.getfile(accelerate.test_utils )
UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = os.path.join(self.test_scripts_folder , """test_performance.py""" )
UpperCamelCase = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
UpperCamelCase = cmd.copy()
for i, strategy in enumerate(__a ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
UpperCamelCase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__a ):
UpperCamelCase = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
UpperCamelCase = len(__a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCamelCase = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
UpperCamelCase = cmd_config[:-1]
UpperCamelCase = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
UpperCamelCase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCamelCase = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__a ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 343 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57 | 0 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCAmelCase = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCAmelCase = 0
for log in Path().glob('''*.log'''):
UpperCAmelCase = 0
with open(log, '''r''') as f:
for line in f:
UpperCAmelCase = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCAmelCase = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCAmelCase = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase = []
log.unlink()
UpperCAmelCase = ''''''
UpperCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase = []
UpperCAmelCase = {}
for test in failed_tests:
UpperCAmelCase = test[0].split('''::''')
UpperCAmelCase = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase = [test[0] for test in failed_table]
UpperCAmelCase = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase = '''Too many failed tests, please see the full report in the Action results.'''
UpperCAmelCase = len(err) + 10
UpperCAmelCase = message[: 3_000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCAmelCase = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCAmelCase = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCAmelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCAmelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCAmelCase = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase = row[0]
else:
UpperCAmelCase = ''''''
UpperCAmelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 172 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : int , snake_case__ : Dict=None , snake_case__ : List[str]=None , *snake_case__ : str , **snake_case__ : Optional[Any] ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
lowercase :int = self.model.config
else:
lowercase :str = config
lowercase :Dict = data_args
lowercase :int = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase :List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase :Union[str, Any] = label_smoothed_nll_loss
def __snake_case ( self : Union[str, Any] , snake_case__ : int ):
'''simple docstring'''
if self.optimizer is None:
lowercase :Optional[int] = ['''bias''', '''LayerNorm.weight''']
lowercase :int = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase :List[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase :Union[str, Any] = Adafactor
lowercase :Dict = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase :List[str] = AdamW
lowercase :Union[str, Any] = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase :Tuple = self.args.learning_rate
if self.sharded_ddp:
lowercase :Union[str, Any] = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
lowercase :Dict = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
lowercase :List[Any] = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def __snake_case ( self : Any , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :Tuple = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase :Dict = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase :str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase :int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def __snake_case ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __snake_case ( self : Any , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase :List[Any] = model(**snake_case__ , use_cache=snake_case__ )[0]
lowercase :Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase , lowercase :str = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
lowercase :str = model(**snake_case__ , use_cache=snake_case__ )[0]
lowercase :Tuple = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
lowercase , lowercase :Optional[int] = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __snake_case ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
lowercase :List[str] = inputs.pop('''labels''' )
lowercase , lowercase :Union[str, Any] = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def __snake_case ( self : List[str] , snake_case__ : nn.Module , snake_case__ : Dict[str, Union[torch.Tensor, Any]] , snake_case__ : bool , snake_case__ : Optional[List[str]] = None , ):
'''simple docstring'''
lowercase :List[str] = self._prepare_inputs(snake_case__ )
lowercase :Optional[Any] = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase :Optional[Any] = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase :int = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs['''max_length'''] )
lowercase :Any = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase , lowercase :List[str] = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
lowercase :List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase :Any = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase :Tuple = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def __snake_case ( self : int , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
lowercase :Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f""" padded to `max_length`={max_length}""" )
lowercase :Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase :Any = tensor
return padded_tensor
| 172 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Any = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowercase_ : Dict = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
_lowercase : Optional[Any] = input("Enter Video/IGTV url: ").strip()
_lowercase : Optional[Any] = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 93 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Dict = WavaVecaPhonemeCTCTokenizer
lowerCamelCase :Optional[int] = False
def UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
_A = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=20 , lowerCAmelCase_=5 ) -> Tuple[str, list]:
_A = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )) for i in range(len(lowerCAmelCase_ ) )]
_A = list(filter(lambda lowerCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
_A = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
_A = toks + toks
# toks_str = [t[1] for t in toks]
_A = [t[0] for t in toks]
# Ensure consistency
_A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
_A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
_A = """ """ + output_txt
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_A = tokenizer("""m xxx ɪ""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_A = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
_A = tokenizer("""maɪ c""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase ( self ) -> int:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_A = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> Dict:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowerCAmelCase_ )
_A = """Hello how are you"""
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""en-us""" ).input_ids
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowerCAmelCase_ , """ɛ l o h aʊ a ʁ j u""" )
def UpperCAmelCase ( self ) -> Any:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how Are you"""
_A = """hello how are you"""
_A = tokenizer(lowerCAmelCase_ ).input_ids
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_A = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_A = tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCAmelCase_ ) )
# transform list to ModelOutput
_A = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
[recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for la, la in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_A = tokenizer.batch_decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ )
_A = [tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ ) for ids in sample_ids]
check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def UpperCAmelCase ( self ) -> int:
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_A = tokenizer.add_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size + len(lowerCAmelCase_ ) )
_A = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_A = tokenizer.add_special_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size_a + len(lowerCAmelCase_ ) )
_A = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> str:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_A = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_A = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(output["""text"""] , lowerCAmelCase_ )
| 180 | 0 |
from math import ceil
def UpperCamelCase( lowercase_ = 1001 ) -> int:
'''simple docstring'''
snake_case_ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
snake_case_ = 2 * i + 1
snake_case_ = 2 * i
snake_case_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCamelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 356 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Union[str, Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = GPTNeoXJapaneseModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase )
snake_case_ = output_from_no_past["""hidden_states"""][0]
snake_case_ = model(
lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["""hidden_states"""][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowerCamelCase_ : Any = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase_ : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : Any = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Optional[int] = False
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> str:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = """abeja/gpt-neox-japanese-2.7b"""
snake_case_ = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
snake_case_ = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(lowerCamelCase , return_tensors="""pt""" ).input_ids
snake_case_ = model.generate(lowerCamelCase , max_length=50 )
snake_case_ = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 34 | 0 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __UpperCAmelCase ( A : float , A : float , A : int ) -> float:
UpperCAmelCase_ : Dict = x
UpperCAmelCase_ : List[str] = y
for step in range(A ): # noqa: B007
UpperCAmelCase_ : Dict = a * a - b * b + x
UpperCAmelCase_ : Tuple = 2 * a * b + y
UpperCAmelCase_ : Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCAmelCase ( A : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __UpperCAmelCase ( A : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(A , 1 , 1 ) )
def __UpperCAmelCase ( A : int = 8_0_0 , A : int = 6_0_0 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 5_0 , A : bool = True , ) -> Image.Image:
UpperCAmelCase_ : Union[str, Any] = Image.new('''RGB''' , (image_width, image_height) )
UpperCAmelCase_ : Any = img.load()
# loop through the image-coordinates
for image_x in range(A ):
for image_y in range(A ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ : Tuple = figure_width / image_width * image_height
UpperCAmelCase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ : int = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ : Optional[Any] = get_distance(A , A , A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ : Dict = get_color_coded_rgb(A )
else:
UpperCAmelCase_ : Tuple = get_black_and_white_rgb(A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_UpperCamelCase : List[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 304 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : str ) -> list[int]:
return [ord(A ) - 9_6 for elem in plain]
def __UpperCAmelCase ( A : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A )
print('''Decoded:''' , decode(A ) )
if __name__ == "__main__":
main()
| 304 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( ) -> list[list[int]]:
return [list(range(10_00 - i, -10_00 - i, -1 ) ) for i in range(10_00 )]
__lowerCamelCase = generate_large_matrix()
__lowerCamelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
assert all(row == sorted(UpperCAmelCase__, reverse=UpperCAmelCase__ ) for row in grid )
assert all(list(UpperCAmelCase__ ) == sorted(UpperCAmelCase__, reverse=UpperCAmelCase__ ) for col in zip(*UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = len(UpperCAmelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
A_ = (left + right) // 2
A_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
A_ = mid + 1
else:
A_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = len(grid[0] )
for i in range(len(UpperCAmelCase__ ) ):
A_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCAmelCase__ ) * len(grid[0] )) - total
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
for row in grid:
for i, number in enumerate(UpperCAmelCase__ ):
if number < 0:
total += len(UpperCAmelCase__ ) - i
break
return total
def UpperCAmelCase__ ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
A_ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
A_ = timeit(F'''{func}(grid=grid)''', setup=UpperCAmelCase__, number=5_00 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 101 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase = logging.getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = git.Repo(search_parent_directories=UpperCAmelCase__ )
A_ = {
"""repo_id""": str(UpperCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(UpperCAmelCase__, """git_log.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__, indent=4 )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
if params.n_gpu <= 0:
A_ = 0
A_ = -1
A_ = True
A_ = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
A_ = int(os.environ["""WORLD_SIZE"""] )
A_ = int(os.environ["""N_GPU_NODE"""] )
A_ = int(os.environ["""RANK"""] )
# number of nodes / node ID
A_ = params.world_size // params.n_gpu_per_node
A_ = params.global_rank // params.n_gpu_per_node
A_ = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
A_ = 1
A_ = 0
A_ = 0
A_ = 0
A_ = 1
A_ = 1
A_ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
A_ = params.node_id == 0 and params.local_rank == 0
A_ = params.n_nodes > 1
# summary
A_ = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""", backend="""nccl""", )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 101 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = KandinskyVaaPriorPipeline
snake_case_ = ["prompt"]
snake_case_ = ["prompt", "negative_prompt"]
snake_case_ = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def UpperCamelCase_ ( self : Optional[int] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.time_input_dim
@property
def UpperCamelCase_ ( self : int ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 1_00
@property
def UpperCamelCase_ ( self : Optional[Any] ):
__A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
__A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
return CLIPTextModelWithProjection(A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
__A = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__A = PriorTransformer(**A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__A = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
__A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=2_24 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,)
__A = CLIPVisionModelWithProjection(A )
return model
@property
def UpperCamelCase_ ( self : Tuple ):
__A = CLIPImageProcessor(
crop_size=2_24 ,do_center_crop=A ,do_normalize=A ,do_resize=A ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=2_24 ,)
return image_processor
def UpperCamelCase_ ( self : List[str] ):
__A = self.dummy_prior
__A = self.dummy_image_encoder
__A = self.dummy_text_encoder
__A = self.dummy_tokenizer
__A = self.dummy_image_processor
__A = UnCLIPScheduler(
variance_type="fixed_small_log" ,prediction_type="sample" ,num_train_timesteps=10_00 ,clip_sample=A ,clip_sample_range=10.0 ,)
__A = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def UpperCamelCase_ ( self : Dict ,A : int ,A : List[Any]=0 ):
if str(A ).startswith("mps" ):
__A = torch.manual_seed(A )
else:
__A = torch.Generator(device=A ).manual_seed(A )
__A = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = "cpu"
__A = self.get_dummy_components()
__A = self.pipeline_class(**A )
__A = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__A = pipe(**self.get_dummy_inputs(A ) )
__A = output.image_embeds
__A = pipe(
**self.get_dummy_inputs(A ) ,return_dict=A ,)[0]
__A = image[0, -10:]
__A = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__A = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Optional[Any] ):
__A = torch_device == "cpu"
__A = True
__A = False
self._test_inference_batch_single_identical(
test_max_difference=A ,relax_max_difference=A ,test_mean_pixel_difference=A ,)
@skip_mps
def UpperCamelCase_ ( self : Optional[Any] ):
__A = torch_device == "cpu"
__A = False
self._test_attention_slicing_forward_pass(
test_max_difference=A ,test_mean_pixel_difference=A ,)
| 15 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE :Optional[int] = NewType('DataClass', Any)
SCREAMING_SNAKE_CASE :int = NewType('DataClassType', Any)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(a_ , a_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def UpperCAmelCase ( a_ ) -> Callable[[str], Any]:
"""simple docstring"""
__A = {str(a_ ): choice for choice in choices}
return lambda a_ : str_to_choice.get(a_ , a_ )
def UpperCAmelCase ( *,
a_ = None , a_ = None , a_ = dataclasses.MISSING , a_ = dataclasses.MISSING , a_ = None , **a_ , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__A = {}
if aliases is not None:
__A = aliases
if help is not None:
__A = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
def __init__( self : Union[str, Any] ,A : Union[DataClassType, Iterable[DataClassType]] ,**A : List[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__A = ArgumentDefaultsHelpFormatter
super().__init__(**A )
if dataclasses.is_dataclass(A ):
__A = [dataclass_types]
__A = list(A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A )
@staticmethod
def UpperCamelCase_ ( A : ArgumentParser ,A : dataclasses.Field ):
__A = f'''--{field.name}'''
__A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
__A = kwargs.pop("aliases" ,[] )
if isinstance(A ,A ):
__A = [aliases]
__A = getattr(field.type ,"__origin__" ,field.type )
if origin_type is Union or (hasattr(A ,"UnionType" ) and isinstance(A ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(A ) not in field.type.__args__:
# filter `str` in Union
__A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__A = getattr(field.type ,"__origin__" ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__A = (
field.type.__args__[0] if isinstance(A ,field.type.__args__[1] ) else field.type.__args__[1]
)
__A = getattr(field.type ,"__origin__" ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__A = {}
if origin_type is Literal or (isinstance(field.type ,A ) and issubclass(field.type ,A )):
if origin_type is Literal:
__A = field.type.__args__
else:
__A = [x.value for x in field.type]
__A = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
__A = field.default
else:
__A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__A = copy(A )
# Hack because type=bool in argparse does not behave as we want.
__A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__A = default
# This tells argparse we accept 0 or 1 value after --field_name
__A = "?"
# This is the value that will get picked if we do --field_name (without value)
__A = True
elif isclass(A ) and issubclass(A ,A ):
__A = field.type.__args__[0]
__A = "+"
if field.default_factory is not dataclasses.MISSING:
__A = field.default_factory()
elif field.default is dataclasses.MISSING:
__A = True
else:
__A = field.type
if field.default is not dataclasses.MISSING:
__A = field.default
elif field.default_factory is not dataclasses.MISSING:
__A = field.default_factory()
else:
__A = True
parser.add_argument(A ,*A ,**A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__A = False
parser.add_argument(f'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : DataClassType ):
if hasattr(A ,"_argument_group_name" ):
__A = self.add_argument_group(dtype._argument_group_name )
else:
__A = self
try:
__A = get_type_hints(A )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ):
__A = ".".join(map(A ,sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(A ):
if not field.init:
continue
__A = type_hints[field.name]
self._parse_dataclass_field(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=None ,A : List[Any]=False ,A : Optional[Any]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__A = []
if args_filename:
args_files.append(Path(A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__A = ArgumentParser()
args_file_parser.add_argument(A ,type=A ,action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
__A , __A = args_file_parser.parse_known_args(args=A )
__A = vars(A ).get(args_file_flag.lstrip("-" ) ,A )
if cmd_args_file_paths:
args_files.extend([Path(A ) for p in cmd_args_file_paths] )
__A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__A = file_args + args if args is not None else file_args + sys.argv[1:]
__A , __A = self.parse_known_args(args=A )
__A = []
for dtype in self.dataclass_types:
__A = {f.name for f in dataclasses.fields(A ) if f.init}
__A = {k: v for k, v in vars(A ).items() if k in keys}
for k in keys:
delattr(A ,A )
__A = dtype(**A )
outputs.append(A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCamelCase_ ( self : Dict ,A : Dict[str, Any] ,A : bool = False ):
__A = set(args.keys() )
__A = []
for dtype in self.dataclass_types:
__A = {f.name for f in dataclasses.fields(A ) if f.init}
__A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__A = dtype(**A )
outputs.append(A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' )
return tuple(A )
def UpperCamelCase_ ( self : List[str] ,A : str ,A : bool = False ):
with open(Path(A ) ,encoding="utf-8" ) as open_json_file:
__A = json.loads(open_json_file.read() )
__A = self.parse_dict(A ,allow_extra_keys=A )
return tuple(A )
def UpperCamelCase_ ( self : int ,A : str ,A : bool = False ):
__A = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) ,allow_extra_keys=A )
return tuple(A )
| 15 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = (UnCLIPScheduler,)
def __magic_name__ ( self : Optional[int], **__A : Union[str, Any] ):
UpperCAmelCase : int = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__A )
return config
def __magic_name__ ( self : Tuple ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__A )
def __magic_name__ ( self : List[Any] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def __magic_name__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def __magic_name__ ( self : Dict ):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__A )
def __magic_name__ ( self : int ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def __magic_name__ ( self : int ):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A, prev_timestep=__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : List[Any] = self.scheduler_classes[0]
UpperCAmelCase : List[Any] = self.get_scheduler_config(variance_type='''fixed_small_log''' )
UpperCAmelCase : List[Any] = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : List[Any] = self.get_scheduler_config(variance_type='''learned_range''' )
UpperCAmelCase : str = scheduler_class(**__A )
UpperCAmelCase : int = 0.5
assert scheduler._get_variance(1, predicted_variance=__A ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(4_8_7, predicted_variance=__A ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(9_9_9, predicted_variance=__A ) - -0.0_0_1_0_0_1_1 < 1E-5
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : str = self.scheduler_classes[0]
UpperCAmelCase : List[Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[int] = scheduler_class(**__A )
UpperCAmelCase : int = scheduler.timesteps
UpperCAmelCase : Tuple = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
UpperCAmelCase : Optional[int] = model(__A, __A )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : Optional[int] = scheduler.step(__A, __A, __A, generator=__A ).prev_sample
UpperCAmelCase : List[Any] = pred_prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(__A ) )
UpperCAmelCase : Tuple = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : int = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(2_5 )
UpperCAmelCase : Dict = scheduler.timesteps
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter
UpperCAmelCase : Any = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
UpperCAmelCase : str = model(__A, __A )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase : Optional[int] = None
else:
UpperCAmelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : Any = scheduler.step(
__A, __A, __A, prev_timestep=__A, generator=__A ).prev_sample
UpperCAmelCase : List[Any] = pred_prev_sample
UpperCAmelCase : Tuple = torch.sum(torch.abs(__A ) )
UpperCAmelCase : Optional[int] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def __magic_name__ ( self : List[str] ):
pass
def __magic_name__ ( self : List[Any] ):
pass
| 369 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 99 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__snake_case , __snake_case , __snake_case : Dict = equationa
__snake_case , __snake_case , __snake_case : Tuple = equationa
# Calculate the determinants of the matrices
__snake_case : Dict = aa * ba - aa * ba
__snake_case : List[Any] = ca * ba - ca * ba
__snake_case : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__snake_case : Optional[int] = determinant_x / determinant
__snake_case : Union[str, Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 172 |
"""simple docstring"""
def __UpperCAmelCase ( ) -> int:
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(UpperCAmelCase_ , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 172 | 1 |
from __future__ import annotations
from collections import deque
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : list[str] ) -> Union[str, Any]:
A : Any = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_UpperCAmelCase )
self.set_fail_transitions()
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : str ) -> Optional[Any]:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : str ) -> Tuple:
A : Dict = 0
for character in keyword:
A : int = self.find_next_state(_UpperCAmelCase , _UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
A : Union[str, Any] = len(self.adlist ) - 1
else:
A : List[str] = next_state
self.adlist[current_state]["output"].append(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
A : Optional[Any] = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCAmelCase )
A : Any = 0
while q:
A : Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCAmelCase )
A : Dict = self.adlist[r]["fail_state"]
while (
self.find_next_state(_UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
A : Any = self.adlist[state]["fail_state"]
A : List[str] = self.find_next_state(
_UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
A : Union[str, Any] = 0
A : List[Any] = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str ) -> List[Any]:
A : Optional[int] = {} # returns a dict with keywords and list of its occurrences
A : Optional[Any] = 0
for i in range(len(_UpperCAmelCase ) ):
while (
self.find_next_state(_UpperCAmelCase , string[i] ) is None
and current_state != 0
):
A : Any = self.adlist[current_state]["fail_state"]
A : int = self.find_next_state(_UpperCAmelCase , string[i] )
if next_state is None:
A : List[Any] = 0
else:
A : List[str] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
A : int = []
result[key].append(i - len(_UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "BridgeTowerImageProcessor"
a__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> Optional[int]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Dict , ) -> BatchEncoding:
A : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel_values + pixel_mask
A : List[Any] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , do_normalize=__lowerCamelCase , do_center_crop=__lowerCamelCase , **__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def SCREAMING_SNAKE_CASE__ ( self : int , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> List[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : str ) -> Any:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A : Dict = self.tokenizer.model_input_names
A : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 256 | 0 |
"""simple docstring"""
lowerCAmelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_lowerCamelCase : Stack[int] = Stack()
_lowerCamelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A_ ) )
elif i in operators:
# RULE 2
operator_stack.push(A_ )
elif i == ")":
# RULE 4
_lowerCamelCase : int = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase : Dict = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : Any = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : Optional[int] = operators[opr](A_, A_ )
operand_stack.push(A_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 72 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34 | 0 |
'''simple docstring'''
lowerCAmelCase_ : List[str] = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase_ : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase_ : Optional[int] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 364 |
'''simple docstring'''
from maths.prime_check import is_prime
def __A ( lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = f"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 1.0 ,A__ = None ,):
super().__init__()
lowercase = initial_learning_rate
lowercase = warmup_steps
lowercase = power
lowercase = decay_schedule_fn
lowercase = name
def __call__( self ,A__):
with tf.name_scope(self.name or '''WarmUp''') as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase = tf.cast(A__ ,tf.floataa)
lowercase = tf.cast(self.warmup_steps ,tf.floataa)
lowercase = global_step_float / warmup_steps_float
lowercase = self.initial_learning_rate * tf.math.pow(A__ ,self.power)
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps) ,name=A__ ,)
def A__ ( self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 0.9 , lowerCAmelCase__ = 0.9_99 , lowerCAmelCase__ = 1E-8 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = None , ):
'''simple docstring'''
lowercase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase__ , )
if num_warmup_steps:
lowercase = WarmUp(
initial_learning_rate=lowerCAmelCase__ , decay_schedule_fn=lowerCAmelCase__ , warmup_steps=lowerCAmelCase__ , )
if weight_decay_rate > 0.0:
lowercase = AdamWeightDecay(
learning_rate=lowerCAmelCase__ , weight_decay_rate=lowerCAmelCase__ , beta_a=lowerCAmelCase__ , beta_a=lowerCAmelCase__ , epsilon=lowerCAmelCase__ , clipnorm=lowerCAmelCase__ , global_clipnorm=lowerCAmelCase__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=lowerCAmelCase__ , )
else:
lowercase = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase__ , beta_a=lowerCAmelCase__ , beta_a=lowerCAmelCase__ , epsilon=lowerCAmelCase__ , clipnorm=lowerCAmelCase__ , global_clipnorm=lowerCAmelCase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ = 0.001 ,A__ = 0.9 ,A__ = 0.999 ,A__ = 1E-7 ,A__ = False ,A__ = 0.0 ,A__ = None ,A__ = None ,A__ = "AdamWeightDecay" ,**A__ ,):
super().__init__(A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,**A__)
lowercase = weight_decay_rate
lowercase = include_in_weight_decay
lowercase = exclude_from_weight_decay
@classmethod
def A__ ( cls ,A__):
lowercase = {'''WarmUp''': WarmUp}
return super(A__ ,cls).from_config(A__ ,custom_objects=A__)
def A__ ( self ,A__ ,A__ ,A__):
super(A__ ,self)._prepare_local(A__ ,A__ ,A__)
lowercase = tf.constant(
self.weight_decay_rate ,name='''adam_weight_decay_rate''')
def A__ ( self ,A__ ,A__ ,A__):
lowercase = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] ,use_locking=self._use_locking ,)
return tf.no_op()
def A__ ( self ,A__ ,A__=None ,**A__):
lowercase , lowercase = list(zip(*A__))
return super(A__ ,self).apply_gradients(zip(A__ ,A__) ,name=A__ ,**A__)
def A__ ( self ,A__ ,A__ ,A__):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase = apply_state or {}
lowercase = apply_state.get((var_device, var_dtype))
if coefficients is None:
lowercase = self._fallback_apply_state(A__ ,A__)
lowercase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def A__ ( self ,A__ ,A__ ,A__=None):
lowercase , lowercase = self._get_lr(var.device ,var.dtype.base_dtype ,A__)
lowercase = self._decay_weights_op(A__ ,A__ ,A__)
with tf.control_dependencies([decay]):
return super(A__ ,self)._resource_apply_dense(A__ ,A__ ,**A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__=None):
lowercase , lowercase = self._get_lr(var.device ,var.dtype.base_dtype ,A__)
lowercase = self._decay_weights_op(A__ ,A__ ,A__)
with tf.control_dependencies([decay]):
return super(A__ ,self)._resource_apply_sparse(A__ ,A__ ,A__ ,**A__)
def A__ ( self):
lowercase = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate})
return config
def A__ ( self ,A__):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(A__ ,A__) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(A__ ,A__) is not None:
return False
return True
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self):
lowercase = []
lowercase = None
@property
def A__ ( self):
if self._accum_steps is None:
lowercase = tf.Variable(
tf.constant(0 ,dtype=tf.intaa) ,trainable=A__ ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def A__ ( self):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''')
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self ,A__):
if not self._gradients:
lowercase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(A__) ,trainable=A__ ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
])
if len(A__) != len(self._gradients):
raise ValueError(f'Expected {len(self._gradients)} gradients, but got {len(A__)}')
for accum_gradient, gradient in zip(self._gradients ,A__):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(A__)
self._accum_steps.assign_add(1)
def A__ ( self):
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(A__))
| 101 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =ShapEPipeline
lowercase_ : List[Any] =['''prompt''']
lowercase_ : int =['''prompt''']
lowercase_ : Union[str, Any] =[
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase_ : Optional[int] =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 8
@property
def A__ ( self):
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(A__)
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase = PriorTransformer(**A__)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase = ShapERenderer(**A__)
return model
def A__ ( self):
lowercase = self.dummy_prior
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_renderer
lowercase = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=A__ ,clip_sample=A__ ,clip_sample_range=1.0 ,)
lowercase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images[0]
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowercase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A__ ( self):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def A__ ( self):
lowercase = torch_device == '''cpu'''
lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=A__ ,relax_max_difference=A__ ,)
def A__ ( self):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = 1
lowercase = 2
lowercase = self.get_dummy_inputs(A__)
for key in inputs.keys():
if key in self.batch_params:
lowercase = batch_size * [inputs[key]]
lowercase = pipe(**A__ ,num_images_per_prompt=A__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''')
lowercase = ShapEPipeline.from_pretrained('''openai/shap-e''')
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = pipe(
'''a shark''' ,generator=A__ ,guidance_scale=15.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(A__ ,A__)
| 101 | 1 |
"""simple docstring"""
import os
def lowerCAmelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
with open(os.path.dirname(_UpperCamelCase ) + '/grid.txt' ) as f:
snake_case = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(_UpperCamelCase ) for x in f.readline().split()] )
snake_case = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
snake_case = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
snake_case = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
snake_case = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
snake_case = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case = temp
return maximum
if __name__ == "__main__":
print(solution())
| 363 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = BioGptForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# create attention mask
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
snake_case = self.seq_length // 2
snake_case = 0
# first forward pass
snake_case ,snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case = ids_tensor((1,) , lowerCAmelCase ).item() + 1
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , )
# get two different outputs
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
# first forward pass
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
snake_case ,snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
'last_hidden_state'
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
snake_case = BioGptForCausalLM(lowerCAmelCase )
model.to(lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(lowerCAmelCase )
snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = BioGptForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : str = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = 'left'
# Define PAD Token = EOS Token = 50256
snake_case = tokenizer.eos_token
snake_case = model.config.eos_token_id
# use different length sentences to test batching
snake_case = [
'Hello, my dog is a little',
'Today, I',
]
snake_case = tokenizer(lowerCAmelCase , return_tensors='pt' , padding=lowerCAmelCase )
snake_case = inputs['input_ids'].to(lowerCAmelCase )
snake_case = model.generate(
input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'].to(lowerCAmelCase ) , )
snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase )
snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BioGptModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = 'multi_label_classification'
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
snake_case = model(lowerCAmelCase )[0]
snake_case = 4_23_84
snake_case = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
snake_case = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
torch.manual_seed(0 )
snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase )
snake_case = model.generate(
**lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase , )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
snake_case = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 149 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __UpperCAmelCase , unittest.TestCase):
lowerCamelCase__ : str = GPTaTokenizer
lowerCamelCase__ : Optional[Any] = GPTaTokenizerFast
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = {'''add_prefix_space''': True}
lowerCamelCase__ : Optional[int] = False
def _UpperCAmelCase ( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowercase__ : List[str] = dict(zip(a , range(len(a ) ) ) )
lowercase__ : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase__ : Any = {'unk_token': '<unk>'}
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _UpperCAmelCase ( self , **a ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a )
def _UpperCAmelCase ( self , **a ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ : Dict = 'lower newer'
lowercase__ : str = 'lower newer'
return input_text, output_text
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : List[Any] = 'lower newer'
lowercase__ : int = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowercase__ : Optional[Any] = tokenizer.tokenize(a , add_prefix_space=a )
self.assertListEqual(a , a )
lowercase__ : List[Any] = tokens + [tokenizer.unk_token]
lowercase__ : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a )
lowercase__ : Optional[Any] = 'lower newer'
# Testing tokenization
lowercase__ : Any = tokenizer.tokenize(a , add_prefix_space=a )
lowercase__ : Optional[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
# Testing conversion to ids without special tokens
lowercase__ : int = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
lowercase__ : Tuple = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=a )
lowercase__ : int = tokenizer.encode(a , add_prefix_space=a )
lowercase__ : List[Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
# Testing the unknown token
lowercase__ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a ) , a )
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
pass
def _UpperCAmelCase ( self , a=1_5 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(a , **a )
# Simple input
lowercase__ : Union[str, Any] = 'This is a simple input'
lowercase__ : Optional[Any] = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : int = ('This is a simple input', 'This is a pair')
lowercase__ : int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowercase__ : Tuple = 'This is a simple input'
lowercase__ : Optional[Any] = ['This is a simple input looooooooong', 'This is a simple input']
lowercase__ : Any = ('This is a simple input', 'This is a pair')
lowercase__ : List[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowercase__ : Optional[int] = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(a , padding='max_length' , max_length=3_0 , return_tensors='np' )
lowercase__ : str = tokenizer(a , padding=a , truncate=a , return_tensors='np' )
lowercase__ : str = tokenizer(*a , padding='max_length' , max_length=6_0 , return_tensors='np' )
lowercase__ : Any = tokenizer(a , padding=a , truncate=a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Any = '$$$'
lowercase__ : int = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a , add_bos_token=a )
lowercase__ : List[Any] = 'This is a simple input'
lowercase__ : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : Union[str, Any] = tokenizer.bos_token_id
lowercase__ : List[str] = tokenizer(a )
lowercase__ : Union[str, Any] = tokenizer(a )
self.assertEqual(out_s.input_ids[0] , a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : str = tokenizer.decode(out_s.input_ids )
lowercase__ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : str = [self.get_tokenizer(do_lower_case=a , add_bos_token=a )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : Optional[int] = 'Encode this.'
lowercase__ : Optional[int] = 'This one too please.'
lowercase__ : Optional[Any] = tokenizer.encode(a , add_special_tokens=a )
encoded_sequence += tokenizer.encode(a , add_special_tokens=a )
lowercase__ : Optional[int] = tokenizer.encode_plus(
a , a , add_special_tokens=a , return_special_tokens_mask=a , )
lowercase__ : Any = encoded_sequence_dict['input_ids']
lowercase__ : int = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(a ) , len(a ) )
lowercase__ : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a )
]
lowercase__ : Tuple = [x for x in filtered_sequence if x is not None]
self.assertEqual(a , a )
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=a )
lowercase__ : int = 'A photo of a cat'
lowercase__ : List[Any] = tokenizer.encode(
a , )
self.assertEqual(a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('test_opt' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('./test_opt' )
lowercase__ : Optional[Any] = tokenizer.encode(
a , )
self.assertEqual(a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Any = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=a )
lowercase__ : str = 'A photo of a cat'
lowercase__ : Tuple = tokenizer.encode(
a , )
# Same as above
self.assertEqual(a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : List[str] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=a )
lowercase__ : Tuple = 'bos'
lowercase__ : Tuple = tokenizer.get_vocab()['bos']
lowercase__ : Dict = 'A photo of a cat'
lowercase__ : Optional[int] = tokenizer.encode(
a , )
# We changed the bos token
self.assertEqual(a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('./tok' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
lowercase__ : List[Any] = tokenizer.encode(
a , )
self.assertEqual(a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 77 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 99 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowercase : Optional[List[str]] = None
_lowercase : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowercase : Optional[int] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __magic_name__ :
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "PIL.Image.Image"
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase)
def __call__( self : Tuple ):
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : int = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase_ : Union[str, Any] = {}
lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
lowercase_ : int = PIL.Image.open(lowercase_ )
else:
lowercase_ : str = path.split("""::""" )[-1]
try:
lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ )
except ValueError:
lowercase_ : str = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowercase_ : Dict = BytesIO(f.read() )
lowercase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE_ ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ : Optional[int] = storage.field("""bytes""" )
else:
lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ : Dict = storage.field("""path""" )
else:
lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ : Optional[int] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : Optional[Any] ):
with xopen(lowercase_ , """rb""" ) as f:
lowercase_ : int = f.read()
return bytes_
lowercase_ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes:
lowercase_ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ : int = image.format
else:
lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__ )
return buffer.getvalue()
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict:
if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase_ : List[Any] = array.dtype
lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase_ : Dict = dtype.kind
lowercase_ : List[Any] = dtype.itemsize
lowercase_ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ )
lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 21 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase ( UpperCAmelCase__ : list ) -> int:
if not postfix_notation:
return 0
lowercase_ : Any = {"""+""", """-""", """*""", """/"""}
lowercase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
lowerCamelCase_ = model
lowerCamelCase_ = kwargs.get("model_save_dir" , UpperCamelCase )
lowerCamelCase_ = kwargs.get("latest_model_name" , UpperCamelCase )
def __call__( self , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {k: np.array(UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase , UpperCamelCase )
@staticmethod
def snake_case ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None ):
"""simple docstring"""
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
lowerCamelCase_ = "CPUExecutionProvider"
return ort.InferenceSession(UpperCamelCase , providers=[provider] , sess_options=UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCamelCase_ = self.model_save_dir.joinpath(self.latest_model_name )
lowerCamelCase_ = Path(UpperCamelCase ).joinpath(UpperCamelCase )
try:
shutil.copyfile(UpperCamelCase , UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCamelCase_ = self.model_save_dir.joinpath(UpperCamelCase )
if src_path.exists():
lowerCamelCase_ = Path(UpperCamelCase ).joinpath(UpperCamelCase )
try:
shutil.copyfile(UpperCamelCase , UpperCamelCase )
except shutil.SameFileError:
pass
def snake_case ( self , UpperCamelCase , **UpperCamelCase , ):
"""simple docstring"""
if os.path.isfile(UpperCamelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
# saving model weights/files
self._save_pretrained(UpperCamelCase , **UpperCamelCase )
@classmethod
def snake_case ( cls , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase ):
lowerCamelCase_ = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase , UpperCamelCase ) , provider=UpperCamelCase , sess_options=UpperCamelCase )
lowerCamelCase_ = Path(UpperCamelCase )
# load model from hub
else:
# download model
lowerCamelCase_ = hf_hub_download(
repo_id=UpperCamelCase , filename=UpperCamelCase , use_auth_token=UpperCamelCase , revision=UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , )
lowerCamelCase_ = Path(UpperCamelCase ).parent
lowerCamelCase_ = Path(UpperCamelCase ).name
lowerCamelCase_ = OnnxRuntimeModel.load_model(UpperCamelCase , provider=UpperCamelCase , sess_options=UpperCamelCase )
return cls(model=UpperCamelCase , **UpperCamelCase )
@classmethod
def snake_case ( cls , UpperCamelCase , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = None
if len(str(UpperCamelCase ).split("@" ) ) == 2:
lowerCamelCase_ ,lowerCamelCase_ = model_id.split("@" )
return cls._from_pretrained(
model_id=UpperCamelCase , revision=UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , use_auth_token=UpperCamelCase , **UpperCamelCase , )
| 55 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=13 , __UpperCamelCase : str=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Dict=32 , __UpperCamelCase : int=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : List[str]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : str=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple="None" , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=None , ) -> Tuple:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = self.get_config()
_UpperCamelCase = 300
return config
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> List[str]:
_UpperCamelCase = DebertaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> Tuple:
_UpperCamelCase = DebertaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> List[Any]:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Dict:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) -> List[Any]:
_UpperCamelCase = DebertaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = DebertaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _UpperCamelCase ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> List[str]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Tuple:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DebertaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
@slow
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_UpperCamelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
# compare the actual values for a slice.
_UpperCamelCase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 256 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[int] = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( A__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = "autoformer"
__lowerCamelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "student_t", lowerCamelCase__ = "nll", lowerCamelCase__ = 1, lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7], lowerCamelCase__ = True, lowerCamelCase__ = 0, lowerCamelCase__ = 0, lowerCamelCase__ = 0, lowerCamelCase__ = 0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = 64, lowerCamelCase__ = 2, lowerCamelCase__ = 2, lowerCamelCase__ = 2, lowerCamelCase__ = 2, lowerCamelCase__ = 32, lowerCamelCase__ = 32, lowerCamelCase__ = "gelu", lowerCamelCase__ = 0.1, lowerCamelCase__ = 0.1, lowerCamelCase__ = 0.1, lowerCamelCase__ = 0.1, lowerCamelCase__ = 0.1, lowerCamelCase__ = 100, lowerCamelCase__ = 0.02, lowerCamelCase__ = True, lowerCamelCase__=True, lowerCamelCase__ = 10, lowerCamelCase__ = 25, lowerCamelCase__ = 3, **lowerCamelCase__, ):
A : Union[str, Any] = prediction_length
A : Union[str, Any] = context_length if context_length is not None else prediction_length
A : Union[str, Any] = distribution_output
A : List[Any] = loss
A : Optional[int] = input_size
A : Dict = num_time_features
A : Optional[int] = lags_sequence
A : Any = scaling
A : Union[str, Any] = num_dynamic_real_features
A : Tuple = num_static_real_features
A : Any = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A : Optional[int] = cardinality
else:
A : str = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A : List[Any] = embedding_dimension
else:
A : Dict = [min(50, (cat + 1) // 2 ) for cat in self.cardinality]
A : Any = num_parallel_samples
# Transformer architecture configuration
A : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
A : Any = d_model
A : Optional[int] = encoder_attention_heads
A : Optional[int] = decoder_attention_heads
A : List[Any] = encoder_ffn_dim
A : Optional[int] = decoder_ffn_dim
A : Tuple = encoder_layers
A : Optional[int] = decoder_layers
A : Optional[Any] = dropout
A : str = attention_dropout
A : str = activation_dropout
A : str = encoder_layerdrop
A : Union[str, Any] = decoder_layerdrop
A : Optional[Any] = activation_function
A : List[Any] = init_std
A : Optional[int] = use_cache
# Autoformer
A : Any = label_length
A : Union[str, Any] = moving_average
A : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase, **__lowercase )
@property
def _lowerCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 116 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Union[str, Any] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : Dict ={
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_lowercase : Any ={"facebook/blenderbot_small-90M": 512}
def lowerCAmelCase_ ( _lowercase : Any) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = set()
a__ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a__ : Optional[Any] = char
a__ : Tuple = set(_lowercase)
return pairs
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Any = ["input_ids", "attention_mask"]
def __init__( self , __lowercase , __lowercase , __lowercase="__start__" , __lowercase="__end__" , __lowercase="__unk__" , __lowercase="__null__" , **__lowercase , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
a__ : Optional[int] = json.load(__lowercase )
a__ : str = {v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
a__ : Any = merges_handle.read().split("""\n""" )[1:-1]
a__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
a__ : Dict = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a__ : Dict = {}
@property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : Any = re.sub("""([.,!?()])""" , r""" \1""" , __lowercase )
a__ : int = re.sub("""(')""" , r""" \1 """ , __lowercase )
a__ : Tuple = re.sub(r"""\s{2,}""" , """ """ , __lowercase )
if "\n" in token:
a__ : Union[str, Any] = token.replace("""\n""" , """ __newln__""" )
a__ : Optional[int] = token.split(""" """ )
a__ : Union[str, Any] = []
for token in tokens:
if not len(__lowercase ):
continue
a__ : Union[str, Any] = token.lower()
a__ : List[Any] = tuple(__lowercase )
a__ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
a__ : Any = get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
a__ : Optional[int] = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : str = bigram
a__ : str = []
a__ : Optional[Any] = 0
while i < len(__lowercase ):
try:
a__ : Tuple = word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
a__ : Optional[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : List[Any] = tuple(__lowercase )
a__ : Any = new_word
if len(__lowercase ) == 1:
break
else:
a__ : Optional[int] = get_pairs(__lowercase )
a__ : List[Any] = """@@ """.join(__lowercase )
a__ : Optional[Any] = word[:-4]
a__ : Any = word
words.append(__lowercase )
return " ".join(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : Dict = []
a__ : Optional[Any] = re.findall(r"""\S+\n?""" , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(""" """ ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
return self.decoder.get(__lowercase , self.unk_token )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : int = """ """.join(__lowercase ).replace("""@@ """ , """""" ).strip()
return out_string
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Dict = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : List[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
a__ : List[str] = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a__ : Optional[int] = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
| 170 | 0 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A_ : Dict = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: List[str] , lowercase_: int ) -> Dict:
try:
with open(lowercase_ , """rb""" ) as flax_state_f:
A__ : Optional[Any] = from_bytes(lowercase_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Any , lowercase_: Dict ) -> Optional[int]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A__ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda lowercase_ : x.dtype == jnp.bfloataa , lowercase_ ) ).values()
if any(lowercase_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A__ : Union[str, Any] = jax.tree_util.tree_map(
lambda lowercase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase_ )
A__ : str = """"""
A__ : Tuple = flatten_dict(lowercase_ , sep=""".""" )
A__ : Dict = pt_model.state_dict()
# keep track of unexpected & missing keys
A__ : int = []
A__ : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A__ : Tuple = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A__ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A__ : Tuple = jnp.transpose(lowercase_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A__ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A__ : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A__ : List[str] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase_ ):
A__ : List[Any] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A__ : Optional[int] = """.""".join(lowercase_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
A__ : Tuple = np.asarray(lowercase_ ) if not isinstance(lowercase_ , np.ndarray ) else flax_tensor
A__ : Tuple = torch.from_numpy(lowercase_ )
# remove from missing keys
missing_keys.remove(lowercase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase_ )
pt_model.load_state_dict(lowercase_ )
# re-transform missing_keys to list
A__ : str = list(lowercase_ )
if len(lowercase_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(lowercase_ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 353 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase (lowercase_: dict , lowercase_: str , lowercase_: set , lowercase_: set , lowercase_: dict , lowercase_: dict , lowercase_: PriorityQueue , lowercase_: dict , lowercase_: float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A__ : Any = cst_fwd.get(lowercase_ , np.inf )
A__ : List[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A__ : Tuple = new_cost_f
A__ : Any = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A__ : Optional[int] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase (lowercase_: str , lowercase_: str , lowercase_: dict , lowercase_: dict ) -> int:
A__ : Dict = -1
A__ : List[Any] = set()
A__ : Union[str, Any] = set()
A__ : Optional[Any] = {source: 0}
A__ : int = {destination: 0}
A__ : Optional[Any] = {source: None}
A__ : Union[str, Any] = {destination: None}
A__ : PriorityQueue[Any] = PriorityQueue()
A__ : PriorityQueue[Any] = PriorityQueue()
A__ : List[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A__ , A__ : Tuple = queue_forward.get()
visited_forward.add(lowercase_ )
A__ , A__ : Optional[Any] = queue_backward.get()
visited_backward.add(lowercase_ )
A__ : List[Any] = pass_and_relaxation(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
A__ : List[Any] = pass_and_relaxation(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A__ : int = shortest_distance
return shortest_path_distance
A_ : List[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A_ : Optional[int] = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = [True] * limit
__snake_case : Any = False
__snake_case : Any = False
__snake_case : Optional[int] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__snake_case : str = i * 2
while index < limit:
__snake_case : Dict = False
__snake_case : Union[str, Any] = index + i
__snake_case : str = [2]
for i in range(3 , A_ , 2 ):
if is_prime[i]:
primes.append(A_ )
return primes
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : List[str] = prime_sieve(A_ )
__snake_case : List[Any] = 0
__snake_case : List[str] = 0
for i in range(len(A_ ) ):
for j in range(i + length , len(A_ ) ):
__snake_case : List[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__snake_case : List[Any] = j - i
__snake_case : List[str] = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 123 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__: str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __magic_name__ ( __UpperCAmelCase ):
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(snake_case__ , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(snake_case__ , '''num_encoder_blocks''' ) )
class __magic_name__ :
def __init__( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=1_3 , snake_case__ : int=6_4 , snake_case__ : List[str]=3 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[Any]=[2, 2, 2, 2] , snake_case__ : Optional[Any]=[8, 4, 2, 1] , snake_case__ : List[str]=[1_6, 3_2, 6_4, 1_2_8] , snake_case__ : str=[1, 4, 8, 1_6] , snake_case__ : int=[1, 2, 4, 8] , snake_case__ : int=True , snake_case__ : Any=True , snake_case__ : int="gelu" , snake_case__ : Optional[Any]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Dict=3 , snake_case__ : int=None , ):
'''simple docstring'''
lowercase :int = parent
lowercase :int = batch_size
lowercase :Any = image_size
lowercase :Union[str, Any] = num_channels
lowercase :int = num_encoder_blocks
lowercase :Optional[Any] = sr_ratios
lowercase :Tuple = depths
lowercase :List[str] = hidden_sizes
lowercase :List[str] = downsampling_rates
lowercase :Union[str, Any] = num_attention_heads
lowercase :Any = is_training
lowercase :Any = use_labels
lowercase :List[Any] = hidden_act
lowercase :Union[str, Any] = hidden_dropout_prob
lowercase :Dict = attention_probs_dropout_prob
lowercase :List[str] = initializer_range
lowercase :str = num_labels
lowercase :Dict = scope
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :List[str] = None
if self.use_labels:
lowercase :List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase :Dict = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : List[str] ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :Dict = SegformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Dict = model(snake_case__ )
lowercase :Dict = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __snake_case ( self : Any , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Optional[Any] = self.num_labels
lowercase :List[Any] = SegformerForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowercase :Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __snake_case ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :Optional[Any] = 1
lowercase :int = SegformerForSemanticSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Optional[int] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertGreater(result.loss , 0.0 )
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Any = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase :Tuple = config_and_inputs
lowercase :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : List[str] = True
__A : Union[str, Any] = False
__A : Union[str, Any] = False
__A : Dict = False
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[str] = SegformerModelTester(self )
lowercase :Any = SegformerConfigTester(self , config_class=snake_case__ )
def __snake_case ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*snake_case__ )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def __snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def __snake_case ( self : Dict ):
'''simple docstring'''
pass
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :str = model_class(snake_case__ )
lowercase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Any = [*signature.parameters.keys()]
lowercase :Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :Dict = True
for model_class in self.all_model_classes:
lowercase :List[Any] = True
lowercase :int = False
lowercase :int = True
lowercase :List[str] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase :Dict = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.attentions
lowercase :Optional[Any] = sum(self.model_tester.depths )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase :Optional[int] = True
lowercase :Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase :Dict = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Tuple = outputs.attentions
self.assertEqual(len(snake_case__ ) , snake_case__ )
# verify the first attentions (first block, first layer)
lowercase :Tuple = (self.model_tester.image_size // 4) ** 2
lowercase :Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowercase :int = (self.model_tester.image_size // 3_2) ** 2
lowercase :List[Any] = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowercase :Any = len(snake_case__ )
# Check attention is always last and order is fine
lowercase :Tuple = True
lowercase :List[Any] = True
lowercase :Any = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase :str = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 1 , len(snake_case__ ) )
lowercase :Optional[Any] = outputs.attentions
self.assertEqual(len(snake_case__ ) , snake_case__ )
# verify the first attentions (first block, first layer)
lowercase :Optional[Any] = (self.model_tester.image_size // 4) ** 2
lowercase :List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __snake_case ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] ):
lowercase :List[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase :str = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.hidden_states
lowercase :List[str] = self.model_tester.num_encoder_blocks
self.assertEqual(len(snake_case__ ) , snake_case__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase , lowercase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase , lowercase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :str = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ):
continue
lowercase :Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowercase :Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowercase :Any = model(**snake_case__ ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __snake_case ( self : Dict ):
'''simple docstring'''
pass
@slow
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :Union[str, Any] = SegformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowerCamelCase () -> Dict:
lowercase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Dict = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
lowercase :Dict = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
snake_case__ )
lowercase :str = prepare_img()
lowercase :int = image_processor(images=snake_case__ , return_tensors='''pt''' )
lowercase :Optional[Any] = encoded_inputs.pixel_values.to(snake_case__ )
with torch.no_grad():
lowercase :List[Any] = model(snake_case__ )
lowercase :str = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :List[Any] = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Dict = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
lowercase :Any = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(snake_case__ )
lowercase :List[str] = prepare_img()
lowercase :str = image_processor(images=snake_case__ , return_tensors='''pt''' )
lowercase :Tuple = encoded_inputs.pixel_values.to(snake_case__ )
with torch.no_grad():
lowercase :Any = model(snake_case__ )
lowercase :Optional[int] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :Tuple = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case__ , atol=1e-1 ) )
@slow
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :int = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
lowercase :Tuple = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
snake_case__ )
lowercase :Optional[Any] = prepare_img()
lowercase :Optional[int] = image_processor(images=snake_case__ , return_tensors='''pt''' )
lowercase :Dict = encoded_inputs.pixel_values.to(snake_case__ )
with torch.no_grad():
lowercase :Tuple = model(snake_case__ )
lowercase :Optional[int] = outputs.logits.detach().cpu()
lowercase :int = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
lowercase :str = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
lowercase :int = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
lowercase :str = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 172 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase (a_ :str , a_ :str) -> str | Literal[False]:
lowercase :Union[str, Any] = list(a_)
lowercase :Optional[Any] = list(a_)
lowercase :str = 0
for i in range(len(a_)):
if lista[i] != lista[i]:
count += 1
lowercase :str = '''_'''
if count > 1:
return False
else:
return "".join(a_)
def lowerCamelCase (a_ :list[str]) -> list[str]:
lowercase :Optional[Any] = []
while True:
lowercase :Tuple = ['''$'''] * len(a_)
lowercase :Tuple = []
for i in range(len(a_)):
for j in range(i + 1 , len(a_)):
lowercase :Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowercase :Tuple = '''*'''
lowercase :Any = '''*'''
temp.append('''X''')
for i in range(len(a_)):
if checka[i] == "$":
pi.append(binary[i])
if len(a_) == 0:
return pi
lowercase :str = list(set(a_))
def lowerCamelCase (a_ :int , a_ :Sequence[float]) -> list[str]:
lowercase :Optional[int] = []
for minterm in minterms:
lowercase :List[str] = ''''''
for _ in range(a_):
lowercase :List[str] = str(minterm % 2) + string
minterm //= 2
temp.append(a_)
return temp
def lowerCamelCase (a_ :str , a_ :str , a_ :int) -> bool:
lowercase :int = list(a_)
lowercase :str = list(a_)
lowercase :List[str] = 0
for i in range(len(a_)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase (a_ :list[list[int]] , a_ :list[str]) -> list[str]:
lowercase :Any = []
lowercase :List[Any] = [0] * len(a_)
for i in range(len(chart[0])):
lowercase :List[Any] = 0
lowercase :int = -1
for j in range(len(a_)):
if chart[j][i] == 1:
count += 1
lowercase :List[Any] = j
if count == 1:
lowercase :Tuple = 1
for i in range(len(a_)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(a_)):
lowercase :List[str] = 0
temp.append(prime_implicants[i])
while True:
lowercase :Tuple = 0
lowercase :Dict = -1
lowercase :int = 0
for i in range(len(a_)):
lowercase :List[Any] = chart[i].count(1)
if count_n > max_n:
lowercase :List[Any] = count_n
lowercase :int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(a_)):
lowercase :Tuple = 0
def lowerCamelCase (a_ :list[str] , a_ :list[str]) -> list[list[int]]:
lowercase :Dict = [[0 for x in range(len(a_))] for x in range(len(a_))]
for i in range(len(a_)):
lowercase :Any = prime_implicants[i].count('''_''')
for j in range(len(a_)):
if is_for_table(prime_implicants[i] , binary[j] , a_):
lowercase :int = 1
return chart
def lowerCamelCase () -> None:
lowercase :int = int(input('''Enter the no. of variables\n'''))
lowercase :Tuple = [
float(a_)
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''').split()
]
lowercase :Dict = decimal_to_binary(a_ , a_)
lowercase :List[Any] = check(a_)
print('''Prime Implicants are:''')
print(a_)
lowercase :Union[str, Any] = prime_implicant_chart(a_ , a_)
lowercase :Dict = selection(a_ , a_)
print('''Essential Prime Implicants are:''')
print(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 172 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
def get_masked_lm_array(lowerCamelCase_ ):
_lowercase : str = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : Dict = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
if "kernel" in name:
_lowercase : Any = array.transpose()
return torch.from_numpy(lowerCamelCase_ )
def get_encoder_array(lowerCamelCase_ ):
_lowercase : Dict = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : Union[str, Any] = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
if "kernel" in name:
_lowercase : Optional[Any] = array.transpose()
return torch.from_numpy(lowerCamelCase_ )
def get_encoder_layer_array(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : Optional[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : Optional[int] = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
if "kernel" in name:
_lowercase : Union[str, Any] = array.transpose()
return torch.from_numpy(lowerCamelCase_ )
def get_encoder_attention_layer_array(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : List[str] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : Tuple = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Union[str, Any] = array.reshape(lowerCamelCase_ )
if "kernel" in name:
_lowercase : Tuple = array.transpose()
return torch.from_numpy(lowerCamelCase_ )
print(F'''Loading model based on config from {config_path}...''' )
_lowercase : List[Any] = BertConfig.from_json_file(lowerCamelCase_ )
_lowercase : Optional[Any] = BertForMaskedLM(lowerCamelCase_ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_lowercase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_lowercase : BertSelfAttention = layer.attention.self
_lowercase : Optional[Any] = get_encoder_attention_layer_array(
lowerCamelCase_ , '_query_dense/kernel' , self_attn.query.weight.data.shape )
_lowercase : Any = get_encoder_attention_layer_array(
lowerCamelCase_ , '_query_dense/bias' , self_attn.query.bias.data.shape )
_lowercase : Tuple = get_encoder_attention_layer_array(
lowerCamelCase_ , '_key_dense/kernel' , self_attn.key.weight.data.shape )
_lowercase : Tuple = get_encoder_attention_layer_array(
lowerCamelCase_ , '_key_dense/bias' , self_attn.key.bias.data.shape )
_lowercase : str = get_encoder_attention_layer_array(
lowerCamelCase_ , '_value_dense/kernel' , self_attn.value.weight.data.shape )
_lowercase : Optional[int] = get_encoder_attention_layer_array(
lowerCamelCase_ , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
_lowercase : BertSelfOutput = layer.attention.output
_lowercase : Tuple = get_encoder_attention_layer_array(
lowerCamelCase_ , '_output_dense/kernel' , self_output.dense.weight.data.shape )
_lowercase : List[Any] = get_encoder_attention_layer_array(
lowerCamelCase_ , '_output_dense/bias' , self_output.dense.bias.data.shape )
_lowercase : Optional[int] = get_encoder_layer_array(lowerCamelCase_ , '_attention_layer_norm/gamma' )
_lowercase : Union[str, Any] = get_encoder_layer_array(lowerCamelCase_ , '_attention_layer_norm/beta' )
# Intermediate
_lowercase : BertIntermediate = layer.intermediate
_lowercase : List[str] = get_encoder_layer_array(lowerCamelCase_ , '_intermediate_dense/kernel' )
_lowercase : List[str] = get_encoder_layer_array(lowerCamelCase_ , '_intermediate_dense/bias' )
# Output
_lowercase : BertOutput = layer.output
_lowercase : List[Any] = get_encoder_layer_array(lowerCamelCase_ , '_output_dense/kernel' )
_lowercase : Tuple = get_encoder_layer_array(lowerCamelCase_ , '_output_dense/bias' )
_lowercase : Optional[Any] = get_encoder_layer_array(lowerCamelCase_ , '_output_layer_norm/gamma' )
_lowercase : Tuple = get_encoder_layer_array(lowerCamelCase_ , '_output_layer_norm/beta' )
# Embeddings
_lowercase : Union[str, Any] = get_encoder_array('_position_embedding_layer/embeddings' )
_lowercase : Any = get_encoder_array('_type_embedding_layer/embeddings' )
_lowercase : str = get_encoder_array('_embedding_norm_layer/gamma' )
_lowercase : List[str] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
_lowercase : Any = model.cls.predictions.transform
_lowercase : Dict = get_masked_lm_array('dense/kernel' )
_lowercase : List[Any] = get_masked_lm_array('dense/bias' )
_lowercase : str = get_masked_lm_array('layer_norm/gamma' )
_lowercase : str = get_masked_lm_array('layer_norm/beta' )
_lowercase : Union[str, Any] = get_masked_lm_array('embedding_table' )
# Pooling
_lowercase : List[Any] = BertPooler(config=lowerCamelCase_ )
_lowercase : BertPooler = get_encoder_array('_pooler_layer/kernel' )
_lowercase : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(lowerCamelCase_ )
# Integration test - should load without any errors ;)
_lowercase : Dict = BertForMaskedLM.from_pretrained(lowerCamelCase_ )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 21 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Any = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase)
_lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : Tuple = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : str = after_output[0]
_lowercase : Optional[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase : Any = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : Tuple = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = to_atuple(vision_model.config.image_size)
_lowercase : Any = to_atuple(vision_model.config.patch_size)
_lowercase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
pt_model.to(lowerCamelCase)
pt_model.eval()
# prepare inputs
_lowercase : Any = inputs_dict
_lowercase : Optional[int] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase : Tuple = pt_model(**lowerCamelCase).to_tuple()
_lowercase : Any = fx_model(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase)
_lowercase : List[Any] = fx_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase)
pt_model_loaded.to(lowerCamelCase)
pt_model_loaded.eval()
with torch.no_grad():
_lowercase : Optional[Any] = pt_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : str = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase)
_lowercase : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Tuple = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params)
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase)
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase : List[str] = config_inputs_dict.pop('vision_config')
_lowercase : str = config_inputs_dict.pop('text_config')
_lowercase : int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase)
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : List[Any] = model_a(**lowerCamelCase)
_lowercase : Tuple = after_outputs[0]
_lowercase : Dict = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : List[Any] = 13
_lowercase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowercase : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModelTester(self)
_lowercase : Any = FlaxBertModelTester(self)
_lowercase : Dict = vit_model_tester.prepare_config_and_inputs()
_lowercase : Any = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : List[str] = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : Tuple = 13
_lowercase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Any = random_attention_mask([batch_size, 4])
_lowercase : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = FlaxCLIPVisionModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = FlaxCLIPVisionModelTester(self)
_lowercase : Union[str, Any] = FlaxBertModelTester(self)
_lowercase : Tuple = clip_model_tester.prepare_config_and_inputs()
_lowercase : str = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Dict = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0)
_lowercase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : List[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='np')
_lowercase : List[Any] = model(**lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3))
| 21 | 1 |
"""simple docstring"""
__lowercase = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
__lowercase = {value: key for key, value in encode_dict.items()}
def lowercase ( A_ )-> Union[str, Any]:
'''simple docstring'''
a : str = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def lowercase ( A_ )-> int:
'''simple docstring'''
if set(A_ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only \'A\', \'B\' and spaces" )
a : Optional[Any] = ''''''
for word in coded.split():
while len(A_ ) != 0:
decoded += decode_dict[word[:5]]
a : Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 226 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '▁'
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__lowerCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__lowerCAmelCase = {'vinai/bartpho-syllable': 1_024}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
_snake_case = vocab_file
_snake_case = monolingual_vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_snake_case = {}
_snake_case = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_snake_case = cnt
cnt += 1
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_snake_case = line.strip().split()[0]
_snake_case = len(self.fairseq_tokens_to_ids )
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_snake_case = len(self.fairseq_tokens_to_ids )
_snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ) -> int:
_snake_case = self.__dict__.copy()
_snake_case = None
_snake_case = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , UpperCAmelCase ) -> Tuple:
_snake_case = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase (self ) -> List[Any]:
return len(self.fairseq_ids_to_tokens )
def lowercase (self ) -> str:
_snake_case = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase (self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase (self , UpperCAmelCase ) -> Tuple:
return self.fairseq_ids_to_tokens[index]
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = """""".join(UpperCAmelCase ).replace(UpperCAmelCase , """ """ ).strip()
return out_string
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , """wb""" ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(UpperCAmelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 341 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCamelCase ( _A : int , _A : Any=False )-> Optional[Any]:
"""simple docstring"""
try:
A__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ = default
else:
# KEY is set, convert it to True or False.
try:
A__ = strtobool(__a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env("RUN_SLOW", default=False)
UpperCAmelCase_ : str = parse_flag_from_env("RUN_REMOTE", default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env("RUN_LOCAL", default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
UpperCAmelCase_ : str = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
UpperCAmelCase_ : List[str] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
UpperCAmelCase_ : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
UpperCAmelCase_ : List[str] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : List[str] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
UpperCAmelCase_ : Optional[int] = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def UpperCamelCase ( _A : List[str] )-> Dict:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
A__ = unittest.skip("test requires faiss" )(__a )
return test_case
def UpperCamelCase ( _A : Optional[int] )-> str:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
A__ = unittest.skip("test requires regex" )(__a )
return test_case
def UpperCamelCase ( _A : Optional[Any] )-> int:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
A__ = unittest.skip("test requires elasticsearch" )(__a )
return test_case
def UpperCamelCase ( _A : Optional[Any] )-> str:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
A__ = unittest.skip("test requires sqlalchemy" )(__a )
return test_case
def UpperCamelCase ( _A : Tuple )-> str:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
A__ = unittest.skip("test requires PyTorch" )(__a )
return test_case
def UpperCamelCase ( _A : str )-> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
A__ = unittest.skip("test requires TensorFlow" )(__a )
return test_case
def UpperCamelCase ( _A : Optional[int] )-> List[Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
A__ = unittest.skip("test requires JAX" )(__a )
return test_case
def UpperCamelCase ( _A : str )-> str:
"""simple docstring"""
if not config.PIL_AVAILABLE:
A__ = unittest.skip("test requires Pillow" )(__a )
return test_case
def UpperCamelCase ( _A : List[str] )-> Any:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__a )
else:
return test_case
def UpperCamelCase ( _A : str )-> List[str]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__a )
else:
return test_case
def UpperCamelCase ( _A : Any )-> Tuple:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__a )
else:
return test_case
def UpperCamelCase ( _A : str )-> Any:
"""simple docstring"""
def _require_spacy_model(_A : str ):
try:
import spacy # noqa F401
spacy.load(__a )
except ImportError:
return unittest.skip("test requires spacy" )(__a )
except OSError:
return unittest.skip("test requires spacy model \'{}\'".format(__a ) )(__a )
else:
return test_case
return _require_spacy_model
def UpperCamelCase ( _A : Dict )-> List[Any]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__a )
else:
return test_case
def UpperCamelCase ( _A : str )-> Any:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__a )
else:
return test_case
def UpperCamelCase ( _A : Any )-> Optional[Any]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
A__ = unittest.skip("test is slow" )(__a )
return test_case
def UpperCamelCase ( _A : List[str] )-> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
A__ = unittest.skip("test is local" )(__a )
return test_case
def UpperCamelCase ( _A : Any )-> int:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
A__ = unittest.skip("test is packaged" )(__a )
return test_case
def UpperCamelCase ( _A : Any )-> Any:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
A__ = unittest.skip("test requires remote" )(__a )
return test_case
def UpperCamelCase ( *_A : Tuple )-> List[Any]:
"""simple docstring"""
def decorate(cls : List[Any] ):
for name, fn in cls.__dict__.items():
if callable(__a ) and name.startswith("test" ):
for decorator in decorators:
A__ = decorator(__a )
setattr(cls , __a , __a )
return cls
return decorate
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : str = 1
lowerCAmelCase : Optional[int] = 2
@contextmanager
def UpperCamelCase ( _A : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS , _A : str=1E-16 )-> Optional[int]:
"""simple docstring"""
A__ = requests.Session().request
def timeout_request(_A : Tuple , _A : str , _A : str , **_A : Optional[Any] ):
# Change the url to an invalid url so that the connection hangs
A__ = 'https://10.255.255.1'
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
A__ = timeout
try:
return online_request(__a , __a , **__a )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
A__ = url
A__ = e.args[0]
A__ = (max_retry_error.args[0].replace("10.255.255.1" , f"""OfflineMock[{url}]""" ),)
A__ = (max_retry_error,)
raise
def raise_connection_error(_A : int , _A : Optional[Any] , **_A : List[str] ):
raise requests.ConnectionError("Offline mode is enabled." , request=__a )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , __a ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , __a ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , __a ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def UpperCamelCase ( *_A : Dict , **_A : Dict )-> List[Any]:
"""simple docstring"""
A__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__a , **__a ) as tmp_dir:
try:
os.chdir(__a )
yield
finally:
os.chdir(__a )
@contextmanager
def UpperCamelCase ( )-> Dict:
"""simple docstring"""
import gc
gc.collect()
A__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCamelCase ( )-> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
A__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
return deepcopy(__a ).integers(0 , 100 , 10 ).tolist() == deepcopy(__a ).integers(0 , 100 , 10 ).tolist()
def UpperCamelCase ( _A : List[Any] )-> str:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A : Dict , *_A : int , **_A : Dict ):
try:
return func(*__a , **__a )
except HTTPError as err:
if str(__a ).startswith("500" ) or str(__a ).startswith("502" ):
pytest.xfail(str(__a ) )
raise err
return decorator.decorator(_wrapper , __a )
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = returncode
A__ = stdout
A__ = stderr
async def UpperCamelCase ( _A : Dict , _A : int )-> str:
"""simple docstring"""
while True:
A__ = await stream.readline()
if line:
callback(__a )
else:
break
async def UpperCamelCase ( _A : Any , _A : Tuple=None , _A : Tuple=None , _A : str=None , _A : int=False , _A : List[Any]=False )-> Tuple:
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(__a ) )
A__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ = []
A__ = []
def tee(_A : Union[str, Any] , _A : int , _A : Any , _A : Dict="" ):
A__ = line.decode("utf-8" ).rstrip()
sink.append(__a )
if not quiet:
print(__a , __a , file=__a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(__a , __a , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda _A : tee(__a , __a , sys.stderr , label="stderr:" ) ),
] , timeout=__a , )
return _RunOutput(await p.wait() , __a , __a )
def UpperCamelCase ( _A : Union[str, Any] , _A : Optional[int]=None , _A : Dict=None , _A : Dict=180 , _A : Optional[Any]=False , _A : List[Any]=True )-> List[str]:
"""simple docstring"""
A__ = asyncio.get_event_loop()
A__ = loop.run_until_complete(
_stream_subprocess(__a , env=__a , stdin=__a , timeout=__a , quiet=__a , echo=__a ) )
A__ = ' '.join(__a )
if result.returncode > 0:
A__ = '\n'.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
A__ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
A__ = re.sub(R"^gw" , "" , __a , 0 , re.M )
return int(__a )
def UpperCamelCase ( )-> Optional[int]:
"""simple docstring"""
A__ = 29500
A__ = pytest_xdist_worker_id()
return port + uniq_delta
| 350 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ : List[Any] = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCamelCase ( _A : List[Any] , _A : int=None )-> Optional[int]:
"""simple docstring"""
require_version(deps[pkg] , _A )
| 198 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase_ :
"""simple docstring"""
@staticmethod
def __magic_name__ (*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
SCREAMING_SNAKE_CASE__ : Tuple = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = vqa_pipeline(__lowercase , top_k=1 )
self.assertEqual(
__lowercase , [
[{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}],
[{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}],
] , )
@require_torch
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
SCREAMING_SNAKE_CASE__ : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """How many cats are there?"""
SCREAMING_SNAKE_CASE__ : List[str] = vqa_pipeline(image=__lowercase , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__lowercase , [{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}, {"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}] )
SCREAMING_SNAKE_CASE__ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__lowercase , [{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}, {"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}] )
@slow
@require_torch
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
SCREAMING_SNAKE_CASE__ : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE__ : Tuple = """How many cats are there?"""
SCREAMING_SNAKE_CASE__ : Dict = vqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
SCREAMING_SNAKE_CASE__ : str = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
pass
| 25 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCAmelCase = '''\
Text data.
Second line of data.'''
UpperCAmelCase = '''file'''
@pytest.fixture(scope='session' )
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__lowercase =bytes(lowercase__, 'utf-8' )
with zstd.open(lowercase__, 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir, lowercase__ ), 'w' ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format', ['gzip', 'xz', 'zstd'] )
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : List[str], lowercase__ : Optional[int], lowercase__ : str, lowercase__ : int, lowercase__ : Dict ):
'''simple docstring'''
__lowercase ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__lowercase =input_paths[compression_format]
__lowercase =tmp_path / 'cache'
__lowercase =DownloadConfig(cache_dir=lowercase__, extract_compressed_file=lowercase__ )
__lowercase =cached_path(lowercase__, download_config=lowercase__ )
with open(lowercase__ ) as f:
__lowercase =f.read()
with open(lowercase__ ) as f:
__lowercase =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted', [True, False] )
@pytest.mark.parametrize('default_cache_dir', [True, False] )
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : Tuple, lowercase__ : int, lowercase__ : int, lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase ='custom_cache'
__lowercase ='custom_extracted_dir'
__lowercase =tmp_path / 'custom_extracted_path'
if default_extracted:
__lowercase =('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR', lowercase__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH', str(lowercase__ ) )
__lowercase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowercase =xz_file
__lowercase =(
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowercase__ )
)
__lowercase =cached_path(lowercase__, download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
__lowercase =str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
__lowercase =str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
__lowercase =str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
__lowercase ='./__missing_file__.txt'
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] ):
'''simple docstring'''
__lowercase =get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowercase__ ) as f:
__lowercase =f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
with pytest.raises(lowercase__ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase__ ):
http_get('https://huggingface.co', temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase__ ):
ftp_get('ftp://huggingface.co', temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase__ ):
fsspec_get('s3://huggingface.co', temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head('s3://huggingface.co' )
| 141 | 0 |
from collections import namedtuple
a =namedtuple("""from_to""", """from_ to""")
a ={
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_01, 1000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_04_54, 2_64.1_72),
"""cubicyard""": from_to(0.7_64_55, 1.3_07_95),
"""cubicfoot""": from_to(0.0_28, 35.31_47),
"""cup""": from_to(0.0_00_23_65_88, 42_26.75),
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid \'from_type\' value: {from_type!r} Supported values are:\n"
+ ', '.join(a_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid \'to_type\' value: {to_type!r}. Supported values are:\n"
+ ', '.join(a_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase__ , lowerCamelCase__ ) -> bool:
__lowerCamelCase : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowerCamelCase : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__lowerCamelCase : Any = proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(lowerCamelCase__ , lowerCamelCase__ ) ) for _ in range(lowerCamelCase__ ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 ) -> None:
def identity_function(lowerCamelCase__ ) -> float:
return x
__lowerCamelCase : str = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print('******************' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
def function_to_integrate(lowerCamelCase__ ) -> float:
return sqrt(4.0 - x * x )
__lowerCamelCase : Any = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Dict= {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any]= ["ChineseCLIPFeatureExtractor"]
_a : Any= ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str]= [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_a : Optional[Any]= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 172 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_a : int= datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
UpperCAmelCase : Optional[datasets.Features] = None
UpperCAmelCase : str = "utf-8"
UpperCAmelCase : Optional[str] = None
UpperCAmelCase : Optional[str] = None
UpperCAmelCase : bool = True # deprecated
UpperCAmelCase : Optional[int] = None # deprecated
UpperCAmelCase : int = 10 << 20 # 10MB
UpperCAmelCase : Optional[bool] = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
UpperCAmelCase : int = JsonConfig
def _lowercase (self : int) -> List[str]:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
__snake_case : Any = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def _lowercase (self : Dict , _A : Any) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
__snake_case : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_A , (str, list, tuple)):
__snake_case : str = data_files
if isinstance(_A , _A):
__snake_case : int = [files]
__snake_case : Tuple = [dl_manager.iter_files(_A) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__snake_case : str = []
for split_name, files in data_files.items():
if isinstance(_A , _A):
__snake_case : Optional[int] = [files]
__snake_case : int = [dl_manager.iter_files(_A) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files}))
return splits
def _lowercase (self : Optional[Any] , _A : pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
__snake_case : List[Any] = self.config.features.arrow_schema.field(_A).type
__snake_case : Any = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[str] = table_cast(_A , self.config.features.arrow_schema)
return pa_table
def _lowercase (self : Dict , _A : Any) -> Union[str, Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(_A)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__snake_case : Tuple = json.load(_A)
# We keep only the field we are interested in
__snake_case : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_A , (list, tuple)):
__snake_case : Optional[int] = set().union(*[row.keys() for row in dataset])
__snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys}
else:
__snake_case : Optional[int] = dataset
__snake_case : Tuple = pa.Table.from_pydict(_A)
yield file_idx, self._cast_table(_A)
# If the file has one json object per line
else:
with open(_A , 'rb') as f:
__snake_case : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__snake_case : Tuple = max(self.config.chunksize // 32 , 16 << 10)
__snake_case : str = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__snake_case : Union[str, Any] = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_A)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__snake_case : int = batch.decode(self.config.encoding , errors=_A).encode('utf-8')
try:
while True:
try:
__snake_case : Tuple = paj.read_json(
io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_A , pa.ArrowInvalid)
and "straddling" not in str(_A)
or block_size > len(_A)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__snake_case : List[Any] = json.load(_A)
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_A , _A): # list is the only sequence type supported in JSON
try:
__snake_case : List[str] = set().union(*[row.keys() for row in dataset])
__snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys}
__snake_case : List[str] = pa.Table.from_pydict(_A)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(_A)
break
else:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A)
batch_idx += 1
| 172 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
def __init__( self : List[Any] ,A : UNetaDModel ,A : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : List[str] ,A : int = 1 ,A : int = 20_00 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : Optional[str] = "pil" ,A : bool = True ,**A : str ,):
__A = self.unet.config.sample_size
__A = (batch_size, 3, img_size, img_size)
__A = self.unet
__A = randn_tensor(A ,generator=A ) * self.scheduler.init_noise_sigma
__A = sample.to(self.device )
self.scheduler.set_timesteps(A )
self.scheduler.set_sigmas(A )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__A = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__A = self.unet(A ,A ).sample
__A = self.scheduler.step_correct(A ,A ,generator=A ).prev_sample
# prediction step
__A = model(A ,A ).sample
__A = self.scheduler.step_pred(A ,A ,A ,generator=A )
__A , __A = output.prev_sample, output.prev_sample_mean
__A = sample_mean.clamp(0 ,1 )
__A = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(A )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A )
| 124 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :int = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : str = DiTPipeline
__lowercase : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__lowercase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__lowercase : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__lowercase : Union[str, Any] = False
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_0_0_0 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = AutoencoderKL()
__SCREAMING_SNAKE_CASE = DDIMScheduler()
__SCREAMING_SNAKE_CASE = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
if str(lowerCAmelCase__).startswith("""mps"""):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3))
__SCREAMING_SNAKE_CASE = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57])
__SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3)
def snake_case_ ( self):
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase__ , expected_max_diff=1E-3)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case_ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""")
pipe.to("""cuda""")
__SCREAMING_SNAKE_CASE = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
__SCREAMING_SNAKE_CASE = pipe.get_label_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=4_0 , output_type="""np""").images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy")
assert np.abs((expected_image - image).max()) < 1E-2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""")
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to("""cuda""")
__SCREAMING_SNAKE_CASE = ["""vase""", """umbrella"""]
__SCREAMING_SNAKE_CASE = pipe.get_label_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2_5 , output_type="""np""").images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"/dit/{word}_512.npy")
assert np.abs((expected_image - image).max()) < 1E-1
| 100 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a_ : Dict , a_ : Union[str, Any]=7 , a_ : Optional[Any]=3 , a_ : List[str]=18 , a_ : Union[str, Any]=30 , a_ : Union[str, Any]=4_00 , a_ : Union[str, Any]=True , a_ : Tuple=None , a_ : Optional[int]=True , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18}
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : Optional[int] = min_resolution
__UpperCAmelCase : Union[str, Any] = max_resolution
__UpperCAmelCase : Tuple = do_resize
__UpperCAmelCase : List[str] = size
__UpperCAmelCase : List[Any] = apply_ocr
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( __UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = LayoutLMvaImageProcessingTester(self )
@property
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''apply_ocr''' ) )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def snake_case__ ( self : int ):
'''simple docstring'''
pass
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase : int = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase : List[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCAmelCase : Any = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
__UpperCAmelCase : Optional[int] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__UpperCAmelCase : Any = image_processing(a_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCAmelCase : Any = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__UpperCAmelCase : Tuple = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
__UpperCAmelCase : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
__UpperCAmelCase : List[Any] = image_processing(a_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 226 | 0 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
lowerCamelCase__ : str = len(bin(_UpperCAmelCase )[3:] )
lowerCamelCase__ : Dict = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
lowerCamelCase__ : Optional[int] = (
(
'1'
+ '0' * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : List[str] = logging.getLogger()
_UpperCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> List[Any]:
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Tuple = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase__ : str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase__ : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : str = "pytorch" ) -> str:
lowerCamelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'output' )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
lowerCamelCase__ : Dict = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase__ : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
lowerCamelCase__ : Dict = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
lowerCamelCase__ : Dict = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Tuple = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 45 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__a: Tuple = None
__a: Tuple = logging.get_logger(__name__)
__a: Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a: Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__a: Tuple = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = TaTokenizer
SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=100 , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Union[str, Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(__lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ : Dict = len(set(filter(lambda __lowerCAmelCase : bool('''extra_id_''' in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : Optional[int] = False if not self.vocab_file else True
lowercase__ : Any = extra_ids
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __lowerCAmelCase , )
return max_model_length
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCAmelCase( self ) -> List[Any]:
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCAmelCase( self ) -> Tuple:
return [self.convert_tokens_to_ids(__lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 198 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list[list[int]]:
__lowerCamelCase : list[list[int]] = []
__lowerCamelCase : list[int] = []
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = sum(lowerCamelCase__ )
create_state_space_tree(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return result
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> None:
if sum(lowerCamelCase__ ) > max_sum or (remaining_nums_sum + sum(lowerCamelCase__ )) < max_sum:
return
if sum(lowerCamelCase__ ) == max_sum:
result.append(lowerCamelCase__ )
return
for index in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
create_state_space_tree(
lowerCamelCase__ , lowerCamelCase__ , index + 1 , [*path, nums[index]] , lowerCamelCase__ , remaining_nums_sum - nums[index] , )
a =[3, 34, 4, 12, 5, 2]
a =9
a =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 113 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a =[
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
__lowerCamelCase : int = True
while ask_again:
__lowerCamelCase : Dict = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=[] , lowerCamelCase__=None , lowerCamelCase__=0 ) -> str:
__lowerCamelCase : Union[str, Any] = BulletMenu(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : List[str] = int(lowerCamelCase__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[Any] = int(lowerCamelCase__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class A_ ( argparse.RawDescriptionHelpFormatter ):
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = super()._format_usage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = usage.replace('<command> [<args>] ' ,'')
return usage
| 113 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase__ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase__ = get_tests_dir('fixtures/vocab.json')
lowerCAmelCase__ = get_tests_dir('fixtures')
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _lowerCamelCase ( self) -> Any:
_A : str = 0
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Dict = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Union[str, Any] = WavaVecaConfig()
_A : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
# save in new folder
model_config.save_pretrained(__lowerCamelCase)
processor.save_pretrained(__lowerCamelCase)
_A : Dict = AutoProcessor.from_pretrained(__lowerCamelCase)
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase))
copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , "vocab.json"))
_A : str = AutoProcessor.from_pretrained(__lowerCamelCase)
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = WavaVecaFeatureExtractor()
_A : List[Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
_A : Union[str, Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase)
# save in new folder
processor.save_pretrained(__lowerCamelCase)
# drop `processor_class` in tokenizer
with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "r") as f:
_A : Dict = json.load(__lowerCamelCase)
config_dict.pop("processor_class")
with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "w") as f:
f.write(json.dumps(__lowerCamelCase))
_A : int = AutoProcessor.from_pretrained(__lowerCamelCase)
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = WavaVecaFeatureExtractor()
_A : List[str] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
_A : Union[str, Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase)
# save in new folder
processor.save_pretrained(__lowerCamelCase)
# drop `processor_class` in feature extractor
with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "r") as f:
_A : Optional[Any] = json.load(__lowerCamelCase)
config_dict.pop("processor_class")
with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "w") as f:
f.write(json.dumps(__lowerCamelCase))
_A : str = AutoProcessor.from_pretrained(__lowerCamelCase)
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = WavaVecaConfig(processor_class="Wav2Vec2Processor")
model_config.save_pretrained(__lowerCamelCase)
# copy relevant files
copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , "vocab.json"))
# create emtpy sample processor
with open(os.path.join(__lowerCamelCase , __lowerCamelCase) , "w") as f:
f.write("{}")
_A : Optional[int] = AutoProcessor.from_pretrained(__lowerCamelCase)
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase):
_A : Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor")
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase):
_A : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase)
_A : str = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase)
self.assertTrue(processor.special_attribute_present)
self.assertEqual(processor.__class__.__name__ , "NewProcessor")
_A : Any = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present)
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor")
_A : Dict = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
# Test we can also load the slow version
_A : str = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase)
_A : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present)
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer")
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
def _lowerCamelCase ( self) -> Dict:
try:
AutoConfig.register("custom" , __lowerCamelCase)
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase)
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase)
AutoProcessor.register(__lowerCamelCase , __lowerCamelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase):
AutoProcessor.register(__lowerCamelCase , __lowerCamelCase)
# Now that the config is registered, it can be used as any other config with the auto-API
_A : Any = CustomFeatureExtractor.from_pretrained(__lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[int] = os.path.join(__lowerCamelCase , "vocab.txt")
with open(__lowerCamelCase , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
_A : List[str] = CustomTokenizer(__lowerCamelCase)
_A : Tuple = CustomProcessor(__lowerCamelCase , __lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowerCamelCase)
_A : Tuple = AutoProcessor.from_pretrained(__lowerCamelCase)
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self) -> Any:
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "AutoFeatureExtractor"
__SCREAMING_SNAKE_CASE = "AutoTokenizer"
__SCREAMING_SNAKE_CASE = False
try:
AutoConfig.register("custom" , __lowerCamelCase)
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase)
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase)
AutoProcessor.register(__lowerCamelCase , __lowerCamelCase)
# If remote code is not set, the default is to use local classes.
_A : Dict = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor")
self.assertEqual(processor.__class__.__name__ , "NewProcessor")
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote code is disabled, we load the local ones.
_A : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase)
self.assertEqual(processor.__class__.__name__ , "NewProcessor")
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub.
_A : Union[str, Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=__lowerCamelCase)
self.assertEqual(processor.__class__.__name__ , "NewProcessor")
self.assertTrue(processor.special_attribute_present)
self.assertTrue(processor.feature_extractor.special_attribute_present)
self.assertTrue(processor.tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self) -> Dict:
_A : Tuple = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast")
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext")
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor")
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowerCamelCase ( cls) -> Any:
_A : Any = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="test-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Tuple:
_A : Optional[Any] = WavaVecaProcessor.from_pretrained(__lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCamelCase , "test-processor") , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : str = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor")
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def _lowerCamelCase ( self) -> str:
_A : Optional[Any] = WavaVecaProcessor.from_pretrained(__lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowerCamelCase , "test-processor-org") , push_to_hub=__lowerCamelCase , use_auth_token=self._token , organization="valid_org" , )
_A : List[str] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org")
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def _lowerCamelCase ( self) -> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_A : Any = CustomFeatureExtractor.from_pretrained(__lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
_A : str = os.path.join(__lowerCamelCase , "vocab.txt")
with open(__lowerCamelCase , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
_A : List[Any] = CustomTokenizer(__lowerCamelCase)
_A : int = CustomProcessor(__lowerCamelCase , __lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token)
_A : Optional[Any] = Repository(__lowerCamelCase , clone_from=F"{USER}/test-dynamic-processor" , token=self._token)
processor.save_pretrained(__lowerCamelCase)
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json")) as f:
_A : Any = json.load(__lowerCamelCase)
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , "custom_feature_extraction.py")))
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , "custom_tokenization.py")))
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , "custom_processing.py")))
repo.push_to_hub()
_A : List[Any] = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=__lowerCamelCase)
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor")
| 11 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCamelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 131072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
}
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
return torch.atana(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / math.pi * 2
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
pass
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(lowerCAmelCase__ , n_attn_layers=4 )
SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=lowerCAmelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['url']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__UpperCamelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__UpperCamelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__UpperCamelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__UpperCamelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif name.startswith(SCREAMING_SNAKE_CASE_ ):
return [name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for v in value]
raise ValueError(F'Attn error with {name}' )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=13 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
SCREAMING_SNAKE_CASE = 0
if string.startswith('net.3.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith('net.' ):
SCREAMING_SNAKE_CASE = string[4:]
while string.startswith('main.7.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[7:]
if string.startswith('main.' ):
SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE = string[:2]
SCREAMING_SNAKE_CASE = string[2:]
else:
SCREAMING_SNAKE_CASE = string[0]
SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = 'mid_block'
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) < 7:
SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'down_blocks.{depth}'
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) > 7:
SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'up_blocks.{max_depth - 1}' if int(SCREAMING_SNAKE_CASE_ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE = convert_resconv_naming(SCREAMING_SNAKE_CASE_ )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE = convert_attn_naming(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = prefix + '.' + new_layer + '.' + string_left
else:
SCREAMING_SNAKE_CASE = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE = rename(SCREAMING_SNAKE_CASE_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = transform_conv_attns(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = v
return new_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE = v.shape[0]
SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
SCREAMING_SNAKE_CASE = download(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_rate']
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_size']
SCREAMING_SNAKE_CASE = Object()
SCREAMING_SNAKE_CASE = sample_size
SCREAMING_SNAKE_CASE = sample_rate
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE_ , sample_rate=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE = DiffusionUncond(SCREAMING_SNAKE_CASE_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE_ )['state_dict'] )
SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE = orig_model.state_dict()
SCREAMING_SNAKE_CASE = rename_orig_weights(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE_ ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(SCREAMING_SNAKE_CASE_ ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE = value.squeeze()
SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 1_00
SCREAMING_SNAKE_CASE = 33
SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE_ )[:-1]
SCREAMING_SNAKE_CASE = get_crash_schedule(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).audios
SCREAMING_SNAKE_CASE = sampling.iplms_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {} )
SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , SCREAMING_SNAKE_CASE_ )
print('Diff max' , SCREAMING_SNAKE_CASE_ )
assert diff_max < 1E-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
main(args)
| 113 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'spiece.model'}
a_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
a_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
a_ = '▁'
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , snake_case : Any , snake_case : Union[str, Any]=True , snake_case : int=True , snake_case : Optional[int]=False , snake_case : int="[CLS]" , snake_case : List[str]="[SEP]" , snake_case : Optional[int]="<unk>" , snake_case : str="[SEP]" , snake_case : List[str]="<pad>" , snake_case : Dict="[CLS]" , snake_case : Any="[MASK]" , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Optional[Any] , ) -> None:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = (
AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case , normalized=snake_case )
if isinstance(snake_case , snake_case )
else mask_token
)
UpperCamelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCamelCase_ : Optional[int] = do_lower_case
UpperCamelCase_ : Union[str, Any] = remove_space
UpperCamelCase_ : Any = keep_accents
UpperCamelCase_ : Dict = vocab_file
UpperCamelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.__dict__.copy()
UpperCamelCase_ : List[str] = None
return state
def __setstate__( self : Tuple , snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ : List[Any] = {}
UpperCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Dict ) -> List[Any]:
"""simple docstring"""
if self.remove_space:
UpperCamelCase_ : Tuple = ' '.join(inputs.strip().split() )
else:
UpperCamelCase_ : Union[str, Any] = inputs
UpperCamelCase_ : Tuple = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
UpperCamelCase_ : Union[str, Any] = unicodedata.normalize('NFKD' , snake_case )
UpperCamelCase_ : Union[str, Any] = ''.join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
UpperCamelCase_ : int = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.preprocess_text(snake_case )
UpperCamelCase_ : List[Any] = self.sp_model.encode(snake_case , out_type=snake_case )
UpperCamelCase_ : Optional[Any] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
UpperCamelCase_ : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase_ : Optional[int] = cur_pieces[1:]
else:
UpperCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return self.sp_model.PieceToId(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = []
UpperCamelCase_ : Dict = ''
UpperCamelCase_ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = []
else:
current_sub_tokens.append(snake_case )
UpperCamelCase_ : Dict = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = [self.sep_token_id]
UpperCamelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : int = [self.sep_token_id]
UpperCamelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ : Optional[int] = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
UpperCamelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 362 |
def __lowercase ( lowerCamelCase : int ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('only integers accepted as input' )
else:
UpperCamelCase_ : Any = str(abs(lowerCamelCase ) )
UpperCamelCase_ : Any = [list(lowerCamelCase ) for char in range(len(lowerCamelCase ) )]
for index in range(len(lowerCamelCase ) ):
num_transpositions[index].pop(lowerCamelCase )
return max(
int(''.join(list(lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 50 | 0 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> int:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : Tuple = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : int = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : List[str] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 124 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : int = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Optional[int] = len(lowercase )
snake_case : str = matrix_length // 2
snake_case : int = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : str = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[Any] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : str = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case , snake_case , snake_case , snake_case : Optional[Any] = split_matrix(lowercase )
snake_case , snake_case , snake_case , snake_case : Any = split_matrix(lowercase )
snake_case : List[Any] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[str] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Tuple = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : str = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : int = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : List[str] = matrix_addition(lowercase ,lowercase )
snake_case : Any = matrix_addition(lowercase ,lowercase )
snake_case : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : Optional[Any] = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : str = matrix_dimensions(lowercase )
snake_case : Optional[Any] = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Optional[Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : Any = matrixa
snake_case : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : Optional[int] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : int = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 124 | 1 |
import os
from math import logaa
def lowerCAmelCase_ ( __lowerCamelCase = "base_exp.txt" ):
__snake_case : float = 0
__snake_case : int = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowerCamelCase ) , __lowerCamelCase ) ) ):
__snake_case , __snake_case : Any = list(map(__lowerCamelCase , line.split("," ) ) )
if x * logaa(__lowerCamelCase ) > largest:
__snake_case : Tuple = x * logaa(__lowerCamelCase )
__snake_case : List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 134 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : int = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "beit"
def __init__( self : Union[str, Any] , lowerCamelCase : Any=8192 , lowerCamelCase : Dict=768 , lowerCamelCase : int=12 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : List[str]=3072 , lowerCamelCase : Tuple="gelu" , lowerCamelCase : Union[str, Any]=0.0 , lowerCamelCase : int=0.0 , lowerCamelCase : Dict=0.02 , lowerCamelCase : List[str]=1E-12 , lowerCamelCase : Optional[Any]=224 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : Any=3 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Any=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : int=False , lowerCamelCase : Any=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[int]=True , lowerCamelCase : int=[3, 5, 7, 11] , lowerCamelCase : str=[1, 2, 3, 6] , lowerCamelCase : int=True , lowerCamelCase : List[Any]=0.4 , lowerCamelCase : int=256 , lowerCamelCase : str=1 , lowerCamelCase : List[str]=False , lowerCamelCase : List[str]=255 , **lowerCamelCase : Dict , ) -> int:
super().__init__(**lowerCamelCase )
__snake_case : Any = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : Optional[Any] = image_size
__snake_case : List[str] = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Any = use_mask_token
__snake_case : List[str] = use_absolute_position_embeddings
__snake_case : List[Any] = use_relative_position_bias
__snake_case : str = use_shared_relative_position_bias
__snake_case : str = layer_scale_init_value
__snake_case : Any = drop_path_rate
__snake_case : int = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case : Optional[Any] = out_indices
__snake_case : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case : int = use_auxiliary_head
__snake_case : int = auxiliary_loss_weight
__snake_case : Optional[int] = auxiliary_channels
__snake_case : int = auxiliary_num_convs
__snake_case : str = auxiliary_concat_input
__snake_case : List[str] = semantic_loss_ignore_index
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = version.parse("1.11" )
@property
def __snake_case ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case ( self : str ) -> float:
return 1E-4
| 134 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowercase ( lowerCAmelCase__ : int = 1500000 ) -> int:
__a = defaultdict(lowerCAmelCase__ )
__a = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCAmelCase__ , 2 ):
if gcd(lowerCAmelCase__ , lowerCAmelCase__ ) > 1:
continue
__a = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCAmelCase__ , limit + 1 , lowerCAmelCase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 45 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = set()
# Replace all the whitespace in our sentence
__a = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase__ ) == 26
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = [False] * 26
for char in input_str:
if char.islower():
__a = True
elif char.isupper():
__a = True
return all(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase ( ) -> None:
from timeit import timeit
__a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45 | 1 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , UpperCAmelCase__ : Optional[Any]=0.0_1 , UpperCAmelCase__ : Dict=1000 ) -> Optional[Any]:
_a : Optional[int] = p_stop
_a : List[str] = max_length
def __iter__( self : int ) -> Tuple:
_a : List[str] = 0
_a : int = False
while not stop and count < self.max_length:
yield count
count += 1
_a : Optional[int] = random.random() < self.p_stop
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Any=True ) -> Union[str, Any]:
_a : Tuple = [
BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
for i in range(2 )
]
_a : str = [list(UpperCAmelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCAmelCase__ ) for shard in batch_sampler_shards] , [len(UpperCAmelCase__ ) for e in expected] )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
_a : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
# Check the shards when the dataset is very small.
_a : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : int = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
_a : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
_a : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
_a : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
_a : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
# Check the shards when the dataset is very small.
_a : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Union[str, Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
_a : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Tuple = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
def _lowercase ( self : Tuple ) -> Dict:
# Check the shards when the dataset is a round multiple of total batch size.
_a : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
_a : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
_a : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
_a : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
_a : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is very small.
_a : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
_a : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
_a : Dict = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
def _lowercase ( self : str ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of batch size.
_a : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
_a : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
_a : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
_a : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is very small.
_a : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
_a : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> List[Any]:
_a : Tuple = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_a : Any = [BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Any=False ) -> int:
random.seed(UpperCAmelCase__ )
_a : Any = list(UpperCAmelCase__ )
_a : Tuple = [
IterableDatasetShard(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , drop_last=UpperCAmelCase__ , num_processes=UpperCAmelCase__ , process_index=UpperCAmelCase__ , split_batches=UpperCAmelCase__ , )
for i in range(UpperCAmelCase__ )
]
_a : Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCAmelCase__ )
iterable_dataset_lists.append(list(UpperCAmelCase__ ) )
_a : str = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_a : Union[str, Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
self.assertTrue(len(UpperCAmelCase__ ) % shard_batch_size == 0 )
_a : List[str] = []
for idx in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCAmelCase__ ) < len(UpperCAmelCase__ ):
reference += reference
self.assertListEqual(UpperCAmelCase__ , reference[: len(UpperCAmelCase__ )] )
def _lowercase ( self : Tuple ) -> List[str]:
_a : Any = 42
_a : str = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
# Edge case with a very small dataset
_a : Optional[int] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
def _lowercase ( self : Tuple ) -> int:
_a : Dict = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
_a : Tuple = SkipBatchSampler(UpperCAmelCase__ , 2 )
self.assertListEqual(list(UpperCAmelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
_a : Optional[int] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self : int ) -> str:
_a : int = DataLoader(list(range(16 ) ) , batch_size=4 )
_a : Tuple = skip_first_batches(UpperCAmelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase ( self : int ) -> Any:
_a : Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowercase ( self : List[str] ) -> int:
Accelerator()
_a : int = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(UpperCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 369 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 324 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 113 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
__UpperCamelCase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
__UpperCamelCase = {f'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : str = FunnelTokenizer
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = 2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = do_lower_case
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 113 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : list[str] ):
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(_UpperCAmelCase )
self.set_fail_transitions()
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : str ):
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(_UpperCAmelCase , _UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(_UpperCAmelCase , self.adlist[child]["""value"""] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["""fail_state"""]
UpperCAmelCase__ = self.find_next_state(
_UpperCAmelCase , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(_UpperCAmelCase ) ):
while (
self.find_next_state(_UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["""fail_state"""]
UpperCAmelCase__ = self.find_next_state(_UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(_UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
while b:
UpperCAmelCase__ , UpperCAmelCase__ = b, a % b
return a
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE__ , a % b )
def _UpperCamelCase ( ):
'''simple docstring'''
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 61 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase_ (A : Tuple , A : int , A : Optional[Any] ):
snake_case__ : Tuple = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case__ : Tuple = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
snake_case__ : str = F'''{src_lang}-{tgt_lang}'''
snake_case__ : Any = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
snake_case__ : Dict = os.path.join(_UpperCAmelCase , 'README.md' )
print(F'''Generating {path}''' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(_UpperCAmelCase )
# make sure we are under the root of the project
a_ :Tuple = Path(__file__).resolve().parent.parent.parent
a_ :List[Any] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ :int = model_name.split("-")
a_ :Tuple = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 277 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Tuple:
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser('test' )
else:
lowerCamelCase__ : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ : List[str] = script_name
else:
lowerCamelCase__ : List[Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase__ : str = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ : Dict = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Any = test_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : list[list[int]] , snake_case : int , snake_case : int , snake_case : list[int] ) -> bool:
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE__ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int ) -> bool:
"""simple docstring"""
# Base Case
if curr_ind == len(snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(snake_case ) ):
if valid_connection(snake_case , snake_case , snake_case , snake_case ):
# Insert current vertex into path as next transition
a : Dict = next_ver
# Validate created path
if util_hamilton_cycle(snake_case , snake_case , curr_ind + 1 ):
return True
# Backtrack
a : Any = -1
return False
def SCREAMING_SNAKE_CASE__ ( snake_case : list[list[int]] , snake_case : int = 0 ) -> list[int]:
"""simple docstring"""
a : Dict = [-1] * (len(snake_case ) + 1)
# initialize start and end of path with starting index
a : Optional[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(snake_case , snake_case , 1 ) else []
| 371 |
'''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> List[Any]:
'''simple docstring'''
A__ , A__ : List[Any] =text, pattern
A__ , A__ : Optional[Any] =len(lowerCAmelCase_ ), len(lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase__ ( self : int ) -> list[int]:
'''simple docstring'''
# searches pattern in text and returns index positions
A__ : List[str] =[]
for i in range(self.textLen - self.patLen + 1 ):
A__ : Any =self.mismatch_in_text(lowerCAmelCase_ )
if mismatch_index == -1:
positions.append(lowerCAmelCase_ )
else:
A__ : List[Any] =self.match_in_pattern(self.text[mismatch_index] )
A__ : Optional[Any] =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case : str = 'ABAABA'
__snake_case : List[str] = 'AB'
__snake_case : str = BoyerMooreSearch(text, pattern)
__snake_case : List[str] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 134 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __snake_case : str, __snake_case : dict ) -> str:
"""simple docstring"""
A__ : Optional[Any] =BeautifulSoup(requests.get(__snake_case, params=__snake_case ).content, """html.parser""" )
A__ : List[str] =soup.find("""div""", attrs={"""class""": """gs_ri"""} )
A__ : Tuple =div.find("""div""", attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
__snake_case : Optional[Any] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 134 | 1 |
UpperCAmelCase : Union[str, Any] = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase : Optional[int] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 370 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : List[Any] ={
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ : List[Any] ={
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
a__ : Optional[int] =f'''{src_lang}-{tgt_lang}'''
a__ : Any =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
a__ : Tuple =os.path.join(SCREAMING_SNAKE_CASE , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
UpperCAmelCase : str = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase : Dict = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = model_name.split("""-""")
UpperCAmelCase : Tuple = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 148 | 0 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __A ( a_ :List[str]) -> str:
__a : str = tmp_path / '''file.csv'''
__a : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def __A ( a_ :Dict) -> str:
__a : Any = tmp_path / '''malformed_file.csv'''
__a : str = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def __A ( a_ :List[str] , a_ :Any) -> Union[str, Any]:
__a : List[str] = tmp_path / '''csv_with_image.csv'''
__a : List[str] = textwrap.dedent(
F"""\
image
{image_file}
""")
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def __A ( a_ :List[Any]) -> str:
__a : int = tmp_path / '''csv_with_label.csv'''
__a : Tuple = textwrap.dedent(
'''\
label
good
bad
good
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def __A ( a_ :Optional[Any]) -> Tuple:
__a : List[str] = tmp_path / '''csv_with_int_list.csv'''
__a : List[Any] = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
def __A ( a_ :Dict , a_ :List[str] , a_ :Optional[Any]) -> Any:
__a : Optional[Any] = Csv()
__a : Tuple = csv._generate_tables([[csv_file, malformed_csv_file]])
with pytest.raises(a_ , match='''Error tokenizing data'''):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a_) in record.message
for record in caplog.records)
@require_pil
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf-8''') as f:
__a : str = f.read().splitlines()[1]
__a : Union[str, Any] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()}))
__a : Union[str, Any] = csv._generate_tables([[csv_file_with_image]])
__a : Optional[int] = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field('''image''').type == Image()()
__a : List[Any] = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def __A ( a_ :List[Any]) -> List[str]:
with open(a_ , encoding='''utf-8''') as f:
__a : List[str] = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''])}))
__a : List[str] = csv._generate_tables([[csv_file_with_label]])
__a : str = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field('''label''').type == ClassLabel(names=['''good''', '''bad'''])()
__a : Any = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad''']).straint(a_) for label in labels]
def __A ( a_ :Tuple) -> str:
__a : Tuple = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a_: [int(a_) for i in x.split()]})
__a : Any = csv._generate_tables([[csv_file_with_int_list]])
__a : Optional[Any] = pa.concat_tables([table for _, table in generator])
assert pa.types.is_list(pa_table.schema.field('''int_list''').type)
__a : Dict = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 160 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE
lowercase__ : int = 'config.json'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin'
lowercase__ : List[str] = 'diffusion_flax_model.msgpack'
lowercase__ : str = 'model.onnx'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors'
lowercase__ : List[str] = 'weights.pb'
lowercase__ : str = 'https://huggingface.co'
lowercase__ : str = default_cache_path
lowercase__ : Optional[int] = 'diffusers_modules'
lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowercase__ : Tuple = ['fp16', 'non-ema']
lowercase__ : int = '.self_attn'
| 324 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> List[Any]:
if index == r:
for j in range(A__ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__a = arr[i]
combination_util(A__ , A__ , A__ , index + 1 , A__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(A__ , A__ , A__ , A__ , A__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> Dict:
__a = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(A__ , A__ , A__ , 0 , A__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase_ = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ):
model.train()
UpperCAmelCase_ : int = model(__lowerCamelCase )
UpperCAmelCase_ : List[str] = F.mse_loss(__lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
set_seed(42 )
UpperCAmelCase_ : Dict = RegressionModel()
UpperCAmelCase_ : Optional[Any] = deepcopy(__lowerCamelCase )
UpperCAmelCase_ : Tuple = RegressionDataset(length=80 )
UpperCAmelCase_ : List[Any] = DataLoader(__lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ : Any = AdamW(params=model.parameters(), lr=1E-3 )
UpperCAmelCase_ : str = AdamW(params=ddp_model.parameters(), lr=1E-3 )
UpperCAmelCase_ : str = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 )
UpperCAmelCase_ : List[str] = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.prepare(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(__lowerCamelCase, __lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( __lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_training_setup(__lowerCamelCase )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Optional[Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def __a ( __lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = get_training_setup(__lowerCamelCase )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ : int = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : Dict = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def __a ( __lowerCamelCase=False, __lowerCamelCase=False ):
UpperCAmelCase_ : Tuple = Accelerator(
split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_training_setup(__lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : int = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase_ : int = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
GradientState._reset_state()
def __a ( __lowerCamelCase=False, __lowerCamelCase=False ):
UpperCAmelCase_ : List[Any] = Accelerator(
split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_training_setup(__lowerCamelCase, __lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : str = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ : str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __a ( ):
UpperCAmelCase_ : Dict = Accelerator()
UpperCAmelCase_ : Tuple = RegressionDataset(length=80 )
UpperCAmelCase_ : str = DataLoader(__lowerCamelCase, batch_size=16 )
UpperCAmelCase_ : Optional[Any] = RegressionDataset(length=96 )
UpperCAmelCase_ : List[Any] = DataLoader(__lowerCamelCase, batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(__lowerCamelCase, __lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if iteration < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if batch_num < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ):
UpperCAmelCase_ : str = Accelerator()
UpperCAmelCase_ : int = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation(__lowerCamelCase, __lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 61 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Any = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = FlaxViTModel(config=lowercase_ )
UpperCAmelCase_ : int = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Optional[Any] = (self.image_size, self.image_size)
UpperCAmelCase_ : List[Any] = (self.patch_size, self.patch_size)
UpperCAmelCase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Tuple = FlaxViTForImageClassification(config=lowercase_ )
UpperCAmelCase_ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Optional[int] = FlaxViTForImageClassification(lowercase_ )
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxViTModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(lowercase_ )
UpperCAmelCase_ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ , **lowercase_ ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : Union[str, Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Tuple = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowercase_ )
| 61 | 1 |
def __UpperCamelCase ( _A : int ) ->bool:
"""simple docstring"""
return str(_A ) == str(_A )[::-1]
def __UpperCamelCase ( _A : int ) ->int:
"""simple docstring"""
return int(_A ) + int(str(_A )[::-1] )
def __UpperCamelCase ( _A : int = 10000 ) ->int:
"""simple docstring"""
lowerCamelCase_ =[]
for num in range(1 , _A ):
lowerCamelCase_ =0
lowerCamelCase_ =num
while iterations < 50:
lowerCamelCase_ =sum_reverse(_A )
iterations += 1
if is_palindrome(_A ):
break
else:
lychrel_nums.append(_A )
return len(_A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 49 |
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE )-> None:
lowerCamelCase_ =data
lowerCamelCase_ =None
lowerCamelCase_ =None
def __UpperCamelCase ( _A : Node | None ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __UpperCamelCase ( _A : Node | None ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __UpperCamelCase ( _A : Node ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __UpperCamelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
lowerCamelCase_ =Node(1 )
lowerCamelCase_ =Node(2 )
lowerCamelCase_ =Node(3 )
lowerCamelCase_ =Node(4 )
lowerCamelCase_ =Node(5 )
lowerCamelCase_ =Node(6 )
lowerCamelCase_ =Node(7 )
lowerCamelCase_ =Node(8 )
lowerCamelCase_ =Node(9 )
print(is_full_binary_tree(_A ) )
print(depth_of_tree(_A ) )
print("""Tree is: """ )
display(_A )
if __name__ == "__main__":
main()
| 49 | 1 |
"""simple docstring"""
from itertools import product
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
lowercase = sides_number
lowercase = max_face_number * dice_number
lowercase = [0] * (max_total + 1)
lowercase = 1
lowercase = range(_a , max_face_number + 1 )
for dice_numbers in product(_a , repeat=_a ):
lowercase = sum(_a )
totals_frequencies[total] += 1
return totals_frequencies
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase = 0
lowercase = 9
lowercase = 4 * 9
lowercase = 6
for peter_total in range(_a , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase = (4**9) * (6**6)
lowercase = peter_wins_count / total_games_number
lowercase = round(_a , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 220 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 0 |
'''simple docstring'''
__lowerCAmelCase : List[Any] =9.80665
def UpperCamelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 123 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 123 | 1 |
import sys
_A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCamelCase__ ( __lowerCAmelCase : str = N ):
"""simple docstring"""
lowerCAmelCase_ = -sys.maxsize - 1
for i in range(len(lowercase__ ) - 12 ):
lowerCAmelCase_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCAmelCase_ = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 231 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__A = "base_with_context"
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : List[Any] ):
snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : Tuple = weights[F'''layers_{lyr_num}''']
snake_case : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : List[Any] = ly_weight["attention"]
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase__ ( lowercase__ : Tuple , lowercase__ : List[Any] ):
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : str = weights[F'''layers_{lyr_num}''']
snake_case : Any = ly_weight["attention"]
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : int = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase__ ( lowercase__ : str , lowercase__ : Union[str, Any] ):
snake_case : int = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case : Union[str, Any] = weights[F'''layers_{lyr_num}''']
snake_case : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
snake_case : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : Union[str, Any] = ly_weight["self_attention"]
snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : List[str] = ly_weight["MultiHeadDotProductAttention_0"]
snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def UpperCamelCase__ ( lowercase__ : Any ):
snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case : List[Any] = jnp.tree_util.tree_map(onp.array , lowercase__ )
snake_case : Tuple = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
snake_case : List[str] = os.path.join(args.checkpoint_path , ".." , "config.gin" )
snake_case : List[str] = inference.parse_training_gin_file(lowercase__ , lowercase__ )
snake_case : List[Any] = inference.InferenceModel(args.checkpoint_path , lowercase__ )
snake_case : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
snake_case : int = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : str = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case : Optional[int] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , lowercase__ )
snake_case : Any = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , lowercase__ )
snake_case : List[Any] = load_decoder(ta_checkpoint["target"]["decoder"] , lowercase__ )
snake_case : int = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
snake_case : Tuple = SpectrogramDiffusionPipeline(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__A = parser.parse_args()
main(args)
| 148 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """gpt_neox_japanese"""
def __init__( self : Any , __UpperCAmelCase : Optional[int]=32000 , __UpperCAmelCase : str=2560 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=1.00 , __UpperCAmelCase : List[Any]=10000 , __UpperCAmelCase : Any=2048 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : List[Any]=1e-5 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Tuple=31996 , __UpperCAmelCase : Any=31999 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : str=0.0 , **__UpperCAmelCase : int , ):
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : str = vocab_size
a : Optional[int] = max_position_embeddings
a : List[Any] = hidden_size
a : List[Any] = num_hidden_layers
a : str = num_attention_heads
a : Tuple = intermediate_multiple_size
a : Union[str, Any] = hidden_act
a : Dict = rotary_pct
a : Union[str, Any] = rotary_emb_base
a : Tuple = initializer_range
a : Tuple = layer_norm_eps
a : str = use_cache
a : str = attention_dropout
a : Union[str, Any] = hidden_dropout
| 226 |
"""simple docstring"""
from math import ceil, sqrt
def lowercase ( A_ = 1_000_000 )-> int:
'''simple docstring'''
a : Tuple = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a : str = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a : Tuple = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 226 | 1 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = 'us-east-1' # defaults region
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str
_snake_case : List[Any] = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_snake_case : Any = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
_snake_case : Tuple = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __UpperCAmelCase ( self ) -> str:
return f"{self.framework}-transfromers-test"
@property
def __UpperCAmelCase ( self ) -> str:
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 29 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __snake_case ( SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __snake_case ( *SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Union[Dict, Any]] = None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Tuple=2 ) -> Optional[Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase : Tuple = take_from
_UpperCAmelCase : Optional[int] = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
f' version {__version__} is >= {version_name}' )
_UpperCAmelCase : Any = None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
_UpperCAmelCase : Tuple = f'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
_UpperCAmelCase : str = f'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
_UpperCAmelCase : Tuple = f'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
_UpperCAmelCase : Optional[int] = warning + " " if standard_warn else ""
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : List[Any] = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase : Optional[int] = call_frame.filename
_UpperCAmelCase : Dict = call_frame.lineno
_UpperCAmelCase : List[Any] = call_frame.function
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 202 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case :Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Any = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Dict = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__snake_case :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Dict = logging.get_logger(__name__)
UpperCamelCase_ : int = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = """informer"""
SCREAMING_SNAKE_CASE_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "student_t" ,_SCREAMING_SNAKE_CASE = "nll" ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "mean" ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = "gelu" ,_SCREAMING_SNAKE_CASE = 0.0_5 ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 100 ,_SCREAMING_SNAKE_CASE = 0.0_2 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE = "prob" ,_SCREAMING_SNAKE_CASE = 5 ,_SCREAMING_SNAKE_CASE = True ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
# time series specific configuration
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_snake_case = cardinality
else:
_snake_case = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_snake_case = embedding_dimension
else:
_snake_case = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
# Informer
_snake_case = attention_type
_snake_case = sampling_factor
_snake_case = distil
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 356 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ : Any = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __a ( _UpperCamelCase: str , _UpperCamelCase: Union[str, Any]=100 , _UpperCamelCase: List[str]=" " ) -> List[str]:
"""simple docstring"""
_snake_case = text.split(_UpperCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )]
def __a ( _UpperCamelCase: dict ) -> dict:
"""simple docstring"""
_snake_case , _snake_case = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(_UpperCamelCase ):
titles.append(title if title is not None else "" )
texts.append(_UpperCamelCase )
return {"title": titles, "text": texts}
def __a ( _UpperCamelCase: dict , _UpperCamelCase: DPRContextEncoder , _UpperCamelCase: DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
_snake_case = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_UpperCamelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_snake_case = ctx_encoder(input_ids.to(device=_UpperCamelCase ) , return_dict=_UpperCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __a ( _UpperCamelCase: "RagExampleArguments" , _UpperCamelCase: "ProcessingArguments" , _UpperCamelCase: "IndexHnswArguments" , ) -> Dict:
"""simple docstring"""
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_snake_case = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_snake_case = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_snake_case = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_UpperCamelCase )
_snake_case = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_snake_case = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_snake_case = dataset.map(
partial(_UpperCamelCase , ctx_encoder=_UpperCamelCase , ctx_tokenizer=_UpperCamelCase ) , batched=_UpperCamelCase , batch_size=processing_args.batch_size , features=_UpperCamelCase , )
# And finally save your dataset
_snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(_UpperCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_snake_case = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=_UpperCamelCase )
# And save the index
_snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_UpperCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : str = field(
default=str(Path(__lowerCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=str(Path(__lowerCAmelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=__lowerCAmelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : int = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ : List[str] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : List[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 142 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Tuple=13 , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : str=True , lowerCamelCase : Tuple=True , lowerCamelCase : int=False , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[Any]=99 , lowerCamelCase : Optional[Any]=32 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : Tuple=4 , lowerCamelCase : int=37 , lowerCamelCase : Tuple="gelu" , lowerCamelCase : str=0.1 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : List[str]=512 , lowerCamelCase : Dict=16 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : Any=0.02 , lowerCamelCase : Any=3 , lowerCamelCase : List[Any]=4 , lowerCamelCase : List[str]=None , ) -> List[str]:
__snake_case : int = parent
__snake_case : int = batch_size
__snake_case : Union[str, Any] = seq_length
__snake_case : Dict = is_training
__snake_case : Optional[int] = use_input_mask
__snake_case : List[Any] = use_token_type_ids
__snake_case : Optional[int] = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : Union[str, Any] = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : Any = num_labels
__snake_case : Optional[int] = num_choices
__snake_case : Any = scope
def __snake_case ( self : int ) -> Any:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Any = None
if self.use_token_type_ids:
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Tuple = None
__snake_case : Optional[int] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : List[Any] ) -> Dict:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : List[Any] ) -> Union[str, Any]:
__snake_case : Optional[int] = LlamaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[int] = model(lowerCamelCase , attention_mask=lowerCamelCase )
__snake_case : int = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , ) -> Optional[Any]:
__snake_case : List[Any] = True
__snake_case : Any = LlamaModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__snake_case : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
__snake_case : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case : List[str] = LlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : int , ) -> Dict:
__snake_case : int = True
__snake_case : Dict = True
__snake_case : List[Any] = LlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
__snake_case : Dict = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__snake_case : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : Dict = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__snake_case : int = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__snake_case : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __snake_case ( self : int ) -> Tuple:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCAmelCase : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case : List[str] = LlamaModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __snake_case ( self : Optional[int] ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Union[str, Any] = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : str ) -> int:
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = 3
__snake_case : str = input_dict["input_ids"]
__snake_case : str = input_ids.ne(1 ).to(lowerCamelCase )
__snake_case : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : Any = LlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case ( self : Optional[int] ) -> List[str]:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = 3
__snake_case : List[Any] = "single_label_classification"
__snake_case : Union[str, Any] = input_dict["input_ids"]
__snake_case : Dict = input_ids.ne(1 ).to(lowerCamelCase )
__snake_case : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : List[Any] = LlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = 3
__snake_case : Tuple = "multi_label_classification"
__snake_case : List[str] = input_dict["input_ids"]
__snake_case : Optional[Any] = input_ids.ne(1 ).to(lowerCamelCase )
__snake_case : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case : List[Any] = LlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __snake_case ( self : str ) -> Any:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __snake_case ( self : Optional[int] , lowerCamelCase : int ) -> Any:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = ids_tensor([1, 10] , config.vocab_size )
__snake_case : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : List[str] = LlamaModel(lowerCamelCase )
original_model.to(lowerCamelCase )
original_model.eval()
__snake_case : Tuple = original_model(lowerCamelCase ).last_hidden_state
__snake_case : Any = original_model(lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Any = {"type": scaling_type, "factor": 10.0}
__snake_case : Union[str, Any] = LlamaModel(lowerCamelCase )
scaled_model.to(lowerCamelCase )
scaled_model.eval()
__snake_case : int = scaled_model(lowerCamelCase ).last_hidden_state
__snake_case : Tuple = scaled_model(lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__snake_case : Optional[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__snake_case : Dict = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__snake_case : Tuple = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case : List[str] = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : Optional[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__snake_case : Union[str, Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__snake_case : Optional[int] = model(torch.tensor(lowerCamelCase ) )
# Expected mean on dim = -1
__snake_case : Optional[int] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case : Optional[Any] = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Optional[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__snake_case : Tuple = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__snake_case : List[Any] = model(torch.tensor(lowerCamelCase ) )
# Expected mean on dim = -1
__snake_case : Union[str, Any] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case : Union[str, Any] = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
__snake_case : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__snake_case : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__snake_case : int = model(torch.tensor(lowerCamelCase ) )
__snake_case : Any = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__snake_case : int = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def __snake_case ( self : Any ) -> Union[str, Any]:
__snake_case : Optional[int] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__snake_case : Union[str, Any] = "Simply put, the theory of relativity states that "
__snake_case : Optional[int] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__snake_case : Tuple = tokenizer.encode(lowerCamelCase , return_tensors="pt" )
__snake_case : List[str] = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowerCamelCase )
# greedy generation outputs
__snake_case : Optional[int] = model.generate(lowerCamelCase , max_new_tokens=64 , top_p=lowerCamelCase , temperature=1 , do_sample=lowerCamelCase )
__snake_case : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 123 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = "data2vec-vision"
def __init__( self : Optional[int] , lowerCamelCase : int=768 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : Union[str, Any]=3072 , lowerCamelCase : Tuple="gelu" , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : int=1E-12 , lowerCamelCase : Optional[int]=224 , lowerCamelCase : List[str]=16 , lowerCamelCase : str=3 , lowerCamelCase : Any=False , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]=False , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=True , lowerCamelCase : Tuple=[3, 5, 7, 11] , lowerCamelCase : Union[str, Any]=[1, 2, 3, 6] , lowerCamelCase : List[str]=True , lowerCamelCase : int=0.4 , lowerCamelCase : Optional[int]=256 , lowerCamelCase : Tuple=1 , lowerCamelCase : Tuple=False , lowerCamelCase : Any=255 , **lowerCamelCase : str , ) -> Optional[int]:
super().__init__(**lowerCamelCase )
__snake_case : Dict = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : int = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Tuple = image_size
__snake_case : Tuple = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = use_mask_token
__snake_case : Dict = use_absolute_position_embeddings
__snake_case : Optional[Any] = use_relative_position_bias
__snake_case : Any = use_shared_relative_position_bias
__snake_case : Union[str, Any] = layer_scale_init_value
__snake_case : List[Any] = drop_path_rate
__snake_case : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case : Optional[int] = out_indices
__snake_case : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case : int = use_auxiliary_head
__snake_case : Optional[Any] = auxiliary_loss_weight
__snake_case : Optional[int] = auxiliary_channels
__snake_case : str = auxiliary_num_convs
__snake_case : Any = auxiliary_concat_input
__snake_case : Optional[Any] = semantic_loss_ignore_index
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = version.parse("1.11" )
@property
def __snake_case ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case ( self : List[Any] ) -> float:
return 1E-4
| 123 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Dict=None ) -> Tuple:
'''simple docstring'''
_a = None
if token is not None:
_a = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
_a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
_a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
_a = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
_a = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowerCAmelCase__ ):
_a = requests.get(url + f'&page={i + 2}' , headers=lowerCAmelCase__ ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _A (lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int]=None ) -> Any:
'''simple docstring'''
_a = None
if token is not None:
_a = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
_a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
_a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
_a = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
_a = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowerCAmelCase__ ):
_a = requests.get(url + f'&page={i + 2}' , headers=lowerCAmelCase__ ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a = None
if token is not None:
_a = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
_a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
_a = result.headers['Location']
_a = requests.get(lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
_a = os.path.join(lowerCAmelCase__ , f'{artifact_name}.zip' )
with open(lowerCAmelCase__ , 'wb' ) as fp:
fp.write(response.content )
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict=None ) -> Optional[Any]:
'''simple docstring'''
_a = []
_a = []
_a = None
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCAmelCase__ ) as f:
for line in f:
_a = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_a = line[: line.index(': ' )]
_a = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
_a = line[len('FAILED ' ) :]
failed_tests.append(lowerCAmelCase__ )
elif filename == "job_name.txt":
_a = line
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` '
f'and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
_a = None
if job_name and job_links:
_a = job_links.get(lowerCAmelCase__ , lowerCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
_a = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
return result
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int]=None ) -> Any:
'''simple docstring'''
_a = []
_a = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ , job_links=lowerCAmelCase__ ) )
return errors
def _A (lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
_a = Counter()
counter.update([x[1] for x in logs] )
_a = counter.most_common()
_a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_a = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
_a = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def _A (lowerCAmelCase__ :Optional[Any] ) -> int:
'''simple docstring'''
_a = test.split('::' )[0]
if test.startswith('tests/models/' ):
_a = test.split('/' )[2]
else:
_a = None
return test
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any]=None ) -> str:
'''simple docstring'''
_a = [(x[0], x[1], get_model(x[2] )) for x in logs]
_a = [x for x in logs if x[2] is not None]
_a = {x[2] for x in logs}
_a = {}
for test in tests:
_a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_a = counter.most_common()
_a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_a = sum(error_counts.values() )
if n_errors > 0:
_a = {'count': n_errors, 'errors': error_counts}
_a = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def _A (lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
_a = '| no. | error | status |'
_a = '|-:|:-|:-|'
_a = [header, sep]
for error in reduced_by_error:
_a = reduced_by_error[error]['count']
_a = f'| {count} | {error[:1_00]} | |'
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
def _A (lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
_a = '| model | no. of errors | major error | count |'
_a = '|-:|-:|-:|-:|'
_a = [header, sep]
for model in reduced_by_model:
_a = reduced_by_model[model]['count']
_a , _a = list(reduced_by_model[model]['errors'].items() )[0]
_a = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
a_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a_ : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a_ : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a_ : str = k.find(" / ")
a_ : Union[str, Any] = k[index + len(" / ") :]
a_ : int = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a_ : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a_ : Any = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a_ : List[str] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a_ : Union[str, Any] = reduce_by_error(errors)
a_ : Dict = reduce_by_model(errors)
a_ : Union[str, Any] = make_github_table(reduced_by_error)
a_ : Any = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 361 |
'''simple docstring'''
def _A (lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] ) -> None:
'''simple docstring'''
_a = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
_a = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
_a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : List[str] = [1, 3, 0, 5, 8, 5]
a_ : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 104 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__A ={1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , a_ : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=a_ )
__UpperCAmelCase : Tuple = list(model.children() )[:-2]
__UpperCAmelCase : Optional[int] = nn.Sequential(*a_ )
__UpperCAmelCase : int = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def snake_case__ ( self : Any , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : int = self.pool(self.model(a_ ) )
__UpperCAmelCase : Optional[Any] = torch.flatten(a_ , start_dim=2 )
__UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , a_ : str , a_ : Any , a_ : List[str] , a_ : Optional[Any] , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = [json.loads(a_ ) for l in open(a_ )]
__UpperCAmelCase : Optional[int] = os.path.dirname(a_ )
__UpperCAmelCase : Optional[int] = tokenizer
__UpperCAmelCase : Optional[int] = labels
__UpperCAmelCase : int = len(a_ )
__UpperCAmelCase : Any = max_seq_length
__UpperCAmelCase : int = transforms
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Any , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=a_ ) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : List[Any] = sentence[: self.max_seq_length]
__UpperCAmelCase : Optional[int] = torch.zeros(self.n_classes )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Optional[int] = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
__UpperCAmelCase : Optional[int] = self.transforms(a_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def a ( _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [len(row['''sentence'''] ) for row in batch]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = len(_UpperCAmelCase ), max(_UpperCAmelCase )
__UpperCAmelCase : Dict = torch.zeros(_UpperCAmelCase , _UpperCAmelCase , dtype=torch.long )
__UpperCAmelCase : Union[str, Any] = torch.zeros(_UpperCAmelCase , _UpperCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
__UpperCAmelCase : Optional[int] = input_row['''sentence''']
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Any = torch.stack([row['''image'''] for row in batch] )
__UpperCAmelCase : int = torch.stack([row['''label'''] for row in batch] )
__UpperCAmelCase : List[str] = torch.stack([row['''image_start_token'''] for row in batch] )
__UpperCAmelCase : Union[str, Any] = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def a ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def a ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 226 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__A =getLogger(__name__)
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 8 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : Any="val" , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=False , _UpperCAmelCase : Union[str, Any]="summarization" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Dict="" , **_UpperCAmelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Any = str(_UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = Path(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(_UpperCAmelCase )
__UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).cuda()
if fpaa:
__UpperCAmelCase : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase ) # update config with task specific params
__UpperCAmelCase : List[str] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__UpperCAmelCase : Any = num_return_sequences
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
__UpperCAmelCase : Optional[Any] = tokenizer.model_max_length
if prefix is None:
__UpperCAmelCase : str = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__UpperCAmelCase : Union[str, Any] = SeqaSeqDataset(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_target_length=10_24 , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , prefix=_UpperCAmelCase , **_UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__UpperCAmelCase : str = ds.make_sortish_sampler(_UpperCAmelCase , distributed=_UpperCAmelCase , add_extra_examples=_UpperCAmelCase , shuffle=_UpperCAmelCase )
__UpperCAmelCase : List[Any] = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn )
__UpperCAmelCase : List[Any] = []
for batch in tqdm(_UpperCAmelCase ):
__UpperCAmelCase : str = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_UpperCAmelCase , num_beams=_UpperCAmelCase , **_UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
__UpperCAmelCase : List[str] = batch['''ids''']
if num_return_sequences > 1:
__UpperCAmelCase : Any = chunks(_UpperCAmelCase , _UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_UpperCAmelCase ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return results, sampler.num_replicas
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_UpperCAmelCase , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_UpperCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_UpperCAmelCase , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_UpperCAmelCase , default=_UpperCAmelCase )
parser.add_argument(
'''--type_path''' , type=_UpperCAmelCase , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_UpperCAmelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_UpperCAmelCase , default=6_00 , required=_UpperCAmelCase , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument('''--tgt_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument(
'''--prefix''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__UpperCAmelCase : Any = time.time()
__UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_known_args()
__UpperCAmelCase : List[Any] = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
__UpperCAmelCase : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) # this handles locking.
__UpperCAmelCase : int = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__UpperCAmelCase : List[Any] = {}
if args.src_lang is not None:
__UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
__UpperCAmelCase : List[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = eval_data_dir(
args.data_dir , _UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
if args.local_rank <= 0:
__UpperCAmelCase : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase : List[str] = gather_results_from_each_node(_UpperCAmelCase , _UpperCAmelCase , args.sync_timeout )
__UpperCAmelCase : List[Any] = combine_partial_results(_UpperCAmelCase )
if args.num_return_sequences > 1:
__UpperCAmelCase : int = save_dir.joinpath('''pseudolabel_results.json''' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return
__UpperCAmelCase : str = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_UpperCAmelCase ) as f:
__UpperCAmelCase : int = [x.rstrip() for x in f.readlines()][: len(_UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
__UpperCAmelCase : Optional[Any] = '''translation''' in args.task
__UpperCAmelCase : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
__UpperCAmelCase : Union[str, Any] = '''bleu''' if calc_bleu else '''rouge'''
__UpperCAmelCase : Dict = score_fn(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = time.time() - start_time
__UpperCAmelCase : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
__UpperCAmelCase : List[str] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__UpperCAmelCase : List[Any] = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase )
print(_UpperCAmelCase )
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(_UpperCAmelCase )
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
for partial_result in partial_results:
records.extend(_UpperCAmelCase )
__UpperCAmelCase : List[str] = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["id"] )
__UpperCAmelCase : Union[str, Any] = [x['''pred'''] for x in records]
return preds
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = time.time()
logger.info('''waiting for all nodes to finish''' )
__UpperCAmelCase : Any = None
while (time.time() - start_wait) < timeout:
__UpperCAmelCase : List[Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(_UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__UpperCAmelCase : Union[str, Any] = lmap(_UpperCAmelCase , _UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 226 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=_a):
"""simple docstring"""
a__ : List[Any] = ["""speech"""]
def __init__( self : Dict , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["""speech"""] )
class lowercase ( metaclass=_a):
"""simple docstring"""
a__ : List[Any] = ["""speech"""]
def __init__( self : Tuple , *__UpperCAmelCase : int , **__UpperCAmelCase : int ) -> Dict:
requires_backends(self , ["""speech"""] )
| 371 |
import warnings
from functools import wraps
from typing import Callable
def __a ( lowerCAmelCase_ : Callable ) -> Callable:
'''simple docstring'''
@wraps(lowerCAmelCase_ )
def _inner_fn(*lowerCAmelCase_ : List[Any] ,**lowerCAmelCase_ : Tuple ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,lowerCAmelCase_ ,)
return fn(*lowerCAmelCase_ ,**lowerCAmelCase_ )
return _inner_fn
| 277 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.