code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt'}
UpperCamelCase : int = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCamelCase : Tuple = {
'YituTech/conv-bert-base': 5_12,
'YituTech/conv-bert-medium-small': 5_12,
'YituTech/conv-bert-small': 5_12,
}
UpperCamelCase : Dict = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ConvBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : str = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class UpperCamelCase ( _a ):
snake_case_ : Optional[int] = """roc_bert"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Dict=3_0_5_2_2 , SCREAMING_SNAKE_CASE : Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE : Optional[int]=1_2 , SCREAMING_SNAKE_CASE : Dict=1_2 , SCREAMING_SNAKE_CASE : int=3_0_7_2 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : str=1e-1_2 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : int="absolute" , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE : Tuple=9_1_0 , SCREAMING_SNAKE_CASE : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE : Tuple=2_4_8_5_8 , SCREAMING_SNAKE_CASE : Any=True , **SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = type_vocab_size
__snake_case = layer_norm_eps
__snake_case = use_cache
__snake_case = enable_pronunciation
__snake_case = enable_shape
__snake_case = pronunciation_embed_dim
__snake_case = pronunciation_vocab_size
__snake_case = shape_embed_dim
__snake_case = shape_vocab_size
__snake_case = concat_input
__snake_case = position_embedding_type
__snake_case = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 707 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase( _a ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict=1_3 , SCREAMING_SNAKE_CASE : str=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=9_9 , SCREAMING_SNAKE_CASE : str=3_2 , SCREAMING_SNAKE_CASE : Optional[int]=5 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : Optional[Any]=3_7 , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=5_1_2 , SCREAMING_SNAKE_CASE : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Tuple="None" , SCREAMING_SNAKE_CASE : List[str]=3 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[str]=None , ) -> Tuple:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = relative_attention
__snake_case = position_biased_input
__snake_case = pos_att_type
__snake_case = scope
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> Any:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ) -> List[str]:
'''simple docstring'''
__snake_case = DebertaVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )[0]
__snake_case = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )[0]
__snake_case = model(SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = DebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ) -> int:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = DebertaVaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = DebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = DebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = DebertaVaForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase( _a , _a , unittest.TestCase ):
snake_case_ : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case_ : Optional[Any] = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ : Dict = True
snake_case_ : Union[str, Any] = False
snake_case_ : Tuple = False
snake_case_ : List[str] = False
snake_case_ : List[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case = DebertaVaModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DebertaVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
__snake_case = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
__snake_case = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
__snake_case = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 473 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( __a , __a , unittest.TestCase ):
"""simple docstring"""
a = IFInpaintingSuperResolutionPipeline
a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
a = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _lowerCAmelCase ( self : str):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : List[Any]=0):
"""simple docstring"""
if str(_A).startswith("""mps"""):
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(_A)
else:
_SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=_A).manual_seed(_A)
_SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_A)).to(_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_A)).to(_A)
_SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_A)).to(_A)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowerCAmelCase ( self : str):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""")
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
self._test_save_load_local()
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 338 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 238 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return abs(SCREAMING_SNAKE_CASE ) if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowercase , _lowercase : int = y, x % y
return abs(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> List[Any]:
try:
_lowercase : int = input('Enter two integers separated by comma (,): ' ).split(',' )
_lowercase : Union[str, Any] = int(nums[0] )
_lowercase : int = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 677 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase ={'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 446 |
import cva
import numpy as np
class snake_case :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : float , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return str(self.k )
def _lowercase ( self : List[str] , lowerCAmelCase_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(lowerCAmelCase_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset ):
for x in range(lowerCAmelCase_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ ,A_ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 393 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
__lowerCAmelCase = {"""mgp-str""": 2_7}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int ,_a : int ,_a : Dict="[GO]" ,_a : List[str]="[GO]" ,_a : Union[str, Any]="[s]" ,_a : Optional[Any]="[GO]" ,**_a : Dict ):
'''simple docstring'''
super().__init__(
unk_token=_a ,bos_token=_a ,eos_token=_a ,pad_token=_a ,**_a ,)
with open(_a ,encoding='utf-8' ) as vocab_handle:
_a : int = json.load(_a )
_a : Tuple = {v: k for k, v in self.vocab.items()}
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return len(self.vocab )
def __lowercase ( self : Dict ):
'''simple docstring'''
return dict(self.vocab ,**self.added_tokens_encoder )
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
_a : Tuple = []
for s in text:
char_tokens.extend(_a )
return char_tokens
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
return self.vocab.get(_a ,self.vocab.get(self.unk_token ) )
def __lowercase ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
return self.decoder.get(_a )
def __lowercase ( self : Optional[Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error('Vocabulary path ({}) should be a directory'.format(_a ) )
return
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(_a ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + '\n' )
return (vocab_file,)
| 319 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 319 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
class __a ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
_A : Any = "maskformer-swin"
_A : Any = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] ,_UpperCamelCase : Any=2_2_4 ,_UpperCamelCase : List[str]=4 ,_UpperCamelCase : Optional[Any]=3 ,_UpperCamelCase : str=9_6 ,_UpperCamelCase : str=[2, 2, 6, 2] ,_UpperCamelCase : str=[3, 6, 1_2, 2_4] ,_UpperCamelCase : Optional[Any]=7 ,_UpperCamelCase : Union[str, Any]=4.0 ,_UpperCamelCase : int=True ,_UpperCamelCase : List[Any]=0.0 ,_UpperCamelCase : List[Any]=0.0 ,_UpperCamelCase : Optional[int]=0.1 ,_UpperCamelCase : List[str]="gelu" ,_UpperCamelCase : Optional[Any]=False ,_UpperCamelCase : int=0.02 ,_UpperCamelCase : Tuple=1e-5 ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=None ,**_UpperCamelCase : List[Any] ,) -> int:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =image_size
SCREAMING_SNAKE_CASE__ =patch_size
SCREAMING_SNAKE_CASE__ =num_channels
SCREAMING_SNAKE_CASE__ =embed_dim
SCREAMING_SNAKE_CASE__ =depths
SCREAMING_SNAKE_CASE__ =len(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =num_heads
SCREAMING_SNAKE_CASE__ =window_size
SCREAMING_SNAKE_CASE__ =mlp_ratio
SCREAMING_SNAKE_CASE__ =qkv_bias
SCREAMING_SNAKE_CASE__ =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ =drop_path_rate
SCREAMING_SNAKE_CASE__ =hidden_act
SCREAMING_SNAKE_CASE__ =use_absolute_embeddings
SCREAMING_SNAKE_CASE__ =layer_norm_eps
SCREAMING_SNAKE_CASE__ =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ =int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
SCREAMING_SNAKE_CASE__ =["""stem"""] + [f"""stage{idx}""" for idx in range(1 ,len(_UpperCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =get_aligned_output_features_output_indices(
out_features=_UpperCamelCase ,out_indices=_UpperCamelCase ,stage_names=self.stage_names )
| 151 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCamelCase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCamelCase_ = [0, 25, 50]
lowerCamelCase_ = [25, 50, 75]
lowerCamelCase_ = fuzz.membership.trimf(X, abca)
lowerCamelCase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCamelCase_ = np.ones(75)
lowerCamelCase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowerCamelCase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCamelCase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCamelCase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCamelCase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCamelCase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCamelCase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCamelCase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCamelCase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 151 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionInpaintPipeline
A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=9,out_channels=4,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),cross_attention_dim=32,attention_head_dim=(2, 4),use_linear_projection=_A,)
SCREAMING_SNAKE_CASE_ : Any = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : str = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCamelCase ( self : Dict,_A : Tuple,_A : List[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 32, 32),rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = image.cpu().permute(0,2,3,1 )[0]
SCREAMING_SNAKE_CASE_ : Tuple = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionInpaintPipeline(**_A )
SCREAMING_SNAKE_CASE_ : Any = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
SCREAMING_SNAKE_CASE_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
SCREAMING_SNAKE_CASE_ : Optional[int] = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_ : Any = StableDiffusionInpaintPipeline.from_pretrained(_A,safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : List[str] = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(
prompt=_A,image=_A,mask_image=_A,generator=_A,output_type="np",)
SCREAMING_SNAKE_CASE_ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
SCREAMING_SNAKE_CASE_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
SCREAMING_SNAKE_CASE_ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Dict = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A,torch_dtype=torch.floataa,safety_checker=_A,)
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : str = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = pipe(
prompt=_A,image=_A,mask_image=_A,generator=_A,output_type="np",)
SCREAMING_SNAKE_CASE_ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
SCREAMING_SNAKE_CASE_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_ : str = PNDMScheduler.from_pretrained(_A,subfolder="scheduler" )
SCREAMING_SNAKE_CASE_ : Tuple = StableDiffusionInpaintPipeline.from_pretrained(
_A,safety_checker=_A,scheduler=_A,torch_dtype=torch.floataa,)
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : str = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=_A,image=_A,mask_image=_A,generator=_A,num_inference_steps=2,output_type="np",)
SCREAMING_SNAKE_CASE_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 316 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( A__ , unittest.TestCase ):
A = UnCLIPImageVariationPipeline
A = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
A = IMAGE_VARIATION_BATCH_PARAMS
A = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
A = False
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,)
return CLIPTextModelWithProjection(_A )
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,num_hidden_layers=5,num_attention_heads=4,image_size=32,intermediate_size=37,patch_size=1,)
return CLIPVisionModelWithProjection(_A )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
SCREAMING_SNAKE_CASE_ : str = UnCLIPTextProjModel(**_A )
return model
@property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
SCREAMING_SNAKE_CASE_ : int = UNetaDConditionModel(**_A )
return model
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_decoder
SCREAMING_SNAKE_CASE_ : str = self.dummy_text_proj
SCREAMING_SNAKE_CASE_ : str = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_super_res_first
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_super_res_last
SCREAMING_SNAKE_CASE_ : Dict = UnCLIPScheduler(
variance_type="learned_range",prediction_type="epsilon",num_train_timesteps=1000,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UnCLIPScheduler(
variance_type="fixed_small_log",prediction_type="epsilon",num_train_timesteps=1000,)
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPImageProcessor(crop_size=32,size=32 )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __UpperCamelCase ( self : Any,_A : Dict,_A : Any=0,_A : str=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, 3, 32, 32),rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=_A ).manual_seed(_A )
if pil_image:
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : List[Any] = input_image.clamp(0,1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = input_image.cpu().permute(0,2,3,1 ).float().numpy()
SCREAMING_SNAKE_CASE_ : List[str] = DiffusionPipeline.numpy_to_pil(_A )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A,pil_image=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A,pil_image=_A )
SCREAMING_SNAKE_CASE_ : Any = pipe(
**_A,return_dict=_A,)[0]
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[Any] = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A,pil_image=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )
SCREAMING_SNAKE_CASE_ : int = output.images
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_inputs(_A,pil_image=_A )
SCREAMING_SNAKE_CASE_ : List[str] = pipe(
**_A,return_dict=_A,)[0]
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : str = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "cpu"
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(_A,pil_image=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
SCREAMING_SNAKE_CASE_ : List[str] = pipe(**_A )
SCREAMING_SNAKE_CASE_ : Any = output.images
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(_A,pil_image=_A )
SCREAMING_SNAKE_CASE_ : List[str] = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(
**_A,return_dict=_A,)[0]
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.device("cpu" )
class a__ :
A = 1
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device=_A ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = pipe.decoder.dtype
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
SCREAMING_SNAKE_CASE_ : Any = pipe.prepare_latents(
_A,dtype=_A,device=_A,generator=_A,latents=_A,scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE_ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
SCREAMING_SNAKE_CASE_ : Any = pipe.prepare_latents(
_A,dtype=_A,device=_A,generator=_A,latents=_A,scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_A,pil_image=_A )
SCREAMING_SNAKE_CASE_ : int = pipe(
**_A,decoder_latents=_A,super_res_latents=_A ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_A,pil_image=_A )
# Don't pass image, instead pass embedding
SCREAMING_SNAKE_CASE_ : str = pipeline_inputs.pop("image" )
SCREAMING_SNAKE_CASE_ : List[str] = pipe.image_encoder(_A ).image_embeds
SCREAMING_SNAKE_CASE_ : Any = pipe(
**_A,decoder_latents=_A,super_res_latents=_A,image_embeddings=_A,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
SCREAMING_SNAKE_CASE_ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_A,expected_max_diff=_A )
@skip_mps
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch_device == "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Dict = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=_A,relax_max_difference=_A,additional_params_copy_to_batched_inputs=_A,)
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
SCREAMING_SNAKE_CASE_ : List[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_A,additional_params_copy_to_batched_inputs=_A,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_A )
@skip_mps
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
SCREAMING_SNAKE_CASE_ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations",torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Any = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(
_A,generator=_A,output_type="np",)
SCREAMING_SNAKE_CASE_ : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(_A,_A,15 )
| 316 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
_UpperCamelCase = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
_UpperCamelCase = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
_UpperCamelCase = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
"""simple docstring"""
from collections import defaultdict
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : Tuple = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
A__ : Optional[int] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(snake_case_ ) )
]
A__ : Optional[int] = defaultdict(snake_case_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
A__ : Optional[int] = (1 << len(snake_case_ )) - 1
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
A__ : Optional[int] = self.count_ways_until(snake_case_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
A__ : List[str] = total_ways_util
return self.dp[mask][task_no]
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
for i in range(len(snake_case_ ) ):
for j in task_performed[i]:
self.task[j].append(snake_case_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_UpperCamelCase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_UpperCamelCase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 363 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
if not nums:
return 0
_snake_case = nums[0]
_snake_case = 0
for num in nums[1:]:
_snake_case , _snake_case = (
max_excluding + num,
max(__A , __A ),
)
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 542 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=True , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size_divisor
_snake_case = do_rescale
def lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = GLPNImageProcessingTester(self )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size_divisor' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'resample' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_rescale' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 542 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
debug_launcher(test_script.main )
def _lowerCamelCase ( self ) -> Dict:
debug_launcher(test_ops.main )
| 76 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'Speech2TextFeatureExtractor'
UpperCamelCase : Optional[Any] = 'Speech2TextTokenizer'
def __init__( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor
SCREAMING_SNAKE_CASE_: List[Any] =False
def __call__( self : Dict , *lowerCAmelCase : str , **lowerCAmelCase : str ) -> str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""raw_speech""" )
else:
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""audio""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =kwargs.pop("""sampling_rate""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""text""" , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_: List[str] =args[0]
SCREAMING_SNAKE_CASE_: List[str] =args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_: Any =encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : int ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@contextmanager
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_: Dict =self.tokenizer
yield
SCREAMING_SNAKE_CASE_: int =self.feature_extractor
SCREAMING_SNAKE_CASE_: str =False
| 409 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowercase = logging.get_logger(__name__)
_lowercase = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
_lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCAmelCase ( _UpperCamelCase ) -> Any:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase__: str = model_type_to_module_name(_UpperCamelCase )
lowerCamelCase__: Dict = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(_UpperCamelCase , _UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_UpperCamelCase , """__name__""" , _UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase__: List[Any] = importlib.import_module("""transformers""" )
if hasattr(_UpperCamelCase , _UpperCamelCase ):
return getattr(_UpperCamelCase , _UpperCamelCase )
return None
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , **_UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(_UpperCamelCase , encoding="""utf-8""" ) as reader:
return json.load(_UpperCamelCase )
class lowerCamelCase__ :
def __init__( self : int ):
'''simple docstring'''
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__a )
def lowerCamelCase_ ( cls : Dict , __a : Optional[int] , **__a : List[str] ):
'''simple docstring'''
lowerCamelCase__: int = kwargs.pop("""config""" , __a )
lowerCamelCase__: int = kwargs.pop("""trust_remote_code""" , __a )
lowerCamelCase__: Tuple = True
lowerCamelCase__: Tuple = ImageProcessingMixin.get_image_processor_dict(__a , **__a )
lowerCamelCase__: List[Any] = config_dict.get("""image_processor_type""" , __a )
lowerCamelCase__: Optional[Any] = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
lowerCamelCase__: Optional[int] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCamelCase__: Any = config_dict.pop("""feature_extractor_type""" , __a )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
lowerCamelCase__: Optional[int] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowerCamelCase__: Dict = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
lowerCamelCase__: str = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__a , __a ):
lowerCamelCase__: Optional[Any] = AutoConfig.from_pretrained(__a , **__a )
# It could be in `config.image_processor_type``
lowerCamelCase__: str = getattr(__a , """image_processor_type""" , __a )
if hasattr(__a , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
lowerCamelCase__: Union[str, Any] = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
lowerCamelCase__: int = image_processor_class_from_name(__a )
lowerCamelCase__: Dict = image_processor_auto_map is not None
lowerCamelCase__: List[Any] = image_processor_class is not None or type(__a ) in IMAGE_PROCESSOR_MAPPING
lowerCamelCase__: List[Any] = resolve_trust_remote_code(
__a , __a , __a , __a )
if has_remote_code and trust_remote_code:
lowerCamelCase__: Union[str, Any] = get_class_from_dynamic_module(
__a , __a , **__a )
lowerCamelCase__: Optional[Any] = kwargs.pop("""code_revision""" , __a )
if os.path.isdir(__a ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__a , **__a )
elif image_processor_class is not None:
return image_processor_class.from_dict(__a , **__a )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__a ) in IMAGE_PROCESSOR_MAPPING:
lowerCamelCase__: Any = IMAGE_PROCESSOR_MAPPING[type(__a )]
return image_processor_class.from_dict(__a , **__a )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase_ ( __a : str , __a : Optional[int] ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(__a , __a )
| 705 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCamelCase__ :
def __init__( self : str , __a : List[str] , __a : Union[str, Any]=None , __a : List[Any]=None , __a : Tuple=None , __a : Any="resnet50" , __a : List[str]=3 , __a : str=32 , __a : str=3 , __a : Tuple=True , __a : Dict=True , ):
'''simple docstring'''
lowerCamelCase__: int = parent
lowerCamelCase__: Tuple = out_indices if out_indices is not None else [4]
lowerCamelCase__: List[Any] = stage_names
lowerCamelCase__: int = out_features
lowerCamelCase__: Tuple = backbone
lowerCamelCase__: Tuple = batch_size
lowerCamelCase__: List[Any] = image_size
lowerCamelCase__: List[Any] = num_channels
lowerCamelCase__: Any = use_pretrained_backbone
lowerCamelCase__: Union[str, Any] = is_training
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__: List[str] = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase_ ( self : Any , __a : Optional[int] , __a : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase__: int = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__: str = config_and_inputs
lowerCamelCase__: Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCamelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
__lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
__lowerCamelCase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[str] = TimmBackboneModelTester(self )
lowerCamelCase__: Tuple = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = """resnet18"""
lowerCamelCase__: List[str] = """microsoft/resnet-18"""
lowerCamelCase__: Dict = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
lowerCamelCase__: int = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCamelCase__: List[Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
lowerCamelCase__: Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[Any] = model_class(__a )
lowerCamelCase__: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase__: Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: List[Any] = True
lowerCamelCase__: Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCamelCase__: Optional[int] = self.all_model_classes[0]
lowerCamelCase__: Tuple = model_class(__a )
model.to(__a )
lowerCamelCase__: Any = self._prepare_for_class(__a , __a )
lowerCamelCase__: int = model(**__a )
lowerCamelCase__: List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCamelCase__: str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCamelCase__: Tuple = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Dict = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase__: Dict = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCamelCase__: Optional[Any] = copy.deepcopy(__a )
lowerCamelCase__: str = None
lowerCamelCase__: int = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase__: Union[str, Any] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCamelCase__: Optional[int] = copy.deepcopy(__a )
lowerCamelCase__: str = False
lowerCamelCase__: List[Any] = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase__: Dict = model(**__a )
| 242 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: np.ndarray , lowerCamelCase_: float , lowerCamelCase_: int = 1_6_0_0_0 ):
"""simple docstring"""
snake_case : Dict = int(round(sample_rate * max_length ) )
if len(lowerCamelCase_ ) <= sample_length:
return wav
snake_case : Union[str, Any] = randint(0 , len(lowerCamelCase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _a :
__magic_name__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Name of a dataset from the datasets package"""})
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A file containing the training audio paths and labels."""})
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A file containing the validation audio paths and labels."""})
__magic_name__ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__magic_name__ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
__magic_name__ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
__magic_name__ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""})
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__magic_name__ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _a :
__magic_name__ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""})
__magic_name__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Name or path of preprocessor config."""})
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""})
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""})
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowercase ( self : Union[str, Any] ) -> Any:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , _lowercase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , lowerCamelCase_ , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
snake_case : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
snake_case : Tuple = DatasetDict()
snake_case : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
snake_case : List[str] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
snake_case : str = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
snake_case : int = feature_extractor.model_input_names[0]
def train_transforms(lowerCamelCase_: List[Any] ):
snake_case : Any = []
for audio in batch[data_args.audio_column_name]:
snake_case : Optional[int] = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCamelCase_ )
snake_case : Optional[Any] = feature_extractor(lowerCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
snake_case : List[Any] = {model_input_name: inputs.get(lowerCamelCase_ )}
snake_case : Optional[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCamelCase_: Union[str, Any] ):
snake_case : List[Any] = [audio["array"] for audio in batch[data_args.audio_column_name]]
snake_case : List[Any] = feature_extractor(lowerCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
snake_case : List[str] = {model_input_name: inputs.get(lowerCamelCase_ )}
snake_case : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : Dict = raw_datasets["train"].features[data_args.label_column_name].names
snake_case , snake_case : Optional[int] = {}, {}
for i, label in enumerate(lowerCamelCase_ ):
snake_case : List[Any] = str(lowerCamelCase_ )
snake_case : Union[str, Any] = label
# Load the accuracy metric from the datasets package
snake_case : Optional[Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_: List[str] ):
snake_case : Optional[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCamelCase_ , references=eval_pred.label_ids )
snake_case : str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel=lowerCamelCase_ , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : int = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case : Tuple = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCamelCase_ , output_all_columns=lowerCamelCase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case : Dict = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCamelCase_ , output_all_columns=lowerCamelCase_ )
# Initialize our trainer
snake_case : Union[str, Any] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , )
# Training
if training_args.do_train:
snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
snake_case : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : str = last_checkpoint
snake_case : Dict = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : str = trainer.evaluate()
trainer.log_metrics("eval" , lowerCamelCase_ )
trainer.save_metrics("eval" , lowerCamelCase_ )
# Write model card and (optionally) push to hub
snake_case : Dict = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
if __name__ == "__main__":
main()
| 449 |
"""simple docstring"""
A = 8.31_4462 # Unit - J mol-1 K-1
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: float , lowerCamelCase_: float , lowerCamelCase_: float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: float , lowerCamelCase_: float , lowerCamelCase_: float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 449 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_, a_ = 0.0, a_ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_A = 'us-east-1' # defaults region
@dataclass
class _lowercase :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
lowercase_ = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _UpperCamelCase ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def _UpperCamelCase ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = SageMakerTestEnvironment(framework=request.cls.framework )
| 133 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> tuple[int, float, str]:
'''simple docstring'''
UpperCAmelCase = cipher_alphabet or [chr(UpperCamelCase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase = {
'''a''': 0.0_8497,
'''b''': 0.0_1492,
'''c''': 0.0_2202,
'''d''': 0.0_4253,
'''e''': 0.1_1162,
'''f''': 0.0_2228,
'''g''': 0.0_2015,
'''h''': 0.0_6094,
'''i''': 0.0_7546,
'''j''': 0.0_0153,
'''k''': 0.0_1292,
'''l''': 0.0_4025,
'''m''': 0.0_2406,
'''n''': 0.0_6749,
'''o''': 0.0_7507,
'''p''': 0.0_1929,
'''q''': 0.0_0095,
'''r''': 0.0_7587,
'''s''': 0.0_6327,
'''t''': 0.0_9356,
'''u''': 0.0_2758,
'''v''': 0.0_0978,
'''w''': 0.0_2560,
'''x''': 0.0_0150,
'''y''': 0.0_1994,
'''z''': 0.0_0077,
}
else:
# Custom frequencies dictionary
UpperCAmelCase = frequencies_dict
if not case_sensitive:
UpperCAmelCase = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase = {}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase = decrypted_with_shift.lower().count(UpperCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase = decrypted_with_shift.count(UpperCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase = min(
UpperCamelCase__ , key=UpperCamelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 130 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=1_0 , )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCAmelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = AudioDiffusionPipeline(vqvae=_A , unet=self.dummy_unet , mel=_A , scheduler=_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(4_2 )
UpperCAmelCase = pipe(generator=_A , steps=4 )
UpperCAmelCase = output.audios[0]
UpperCAmelCase = output.images[0]
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(4_2 )
UpperCAmelCase = pipe(generator=_A , steps=4 , return_dict=_A )
UpperCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase = DDIMScheduler()
UpperCAmelCase = self.dummy_vqvae_and_unet
UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_A , scheduler=_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
UpperCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(4_2 )
UpperCAmelCase = pipe(raw_audio=_A , generator=_A , start_step=5 , steps=1_0 )
UpperCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase = self.dummy_unet_condition
UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_A , mel=_A , scheduler=_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
UpperCAmelCase = torch.rand((1, 1, 1_0) )
UpperCAmelCase = pipe(generator=_A , encoding=_A )
UpperCAmelCase = output.images[0]
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch_device
UpperCAmelCase = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(4_2 )
UpperCAmelCase = pipe(generator=_A )
UpperCAmelCase = output.audios[0]
UpperCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 130 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : List[Any] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "table-transformer"
__UpperCAmelCase : Optional[int] = ["past_key_values"]
__UpperCAmelCase : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , lowerCamelCase : str=True , lowerCamelCase : Tuple=None , lowerCamelCase : Dict=3 , lowerCamelCase : Optional[int]=100 , lowerCamelCase : List[str]=6 , lowerCamelCase : Dict=2048 , lowerCamelCase : str=8 , lowerCamelCase : Dict=6 , lowerCamelCase : List[str]=2048 , lowerCamelCase : Optional[int]=8 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[Any]="relu" , lowerCamelCase : int=256 , lowerCamelCase : str=0.1 , lowerCamelCase : int=0.0 , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : Any=0.02 , lowerCamelCase : List[Any]=1.0 , lowerCamelCase : str=False , lowerCamelCase : Optional[Any]="sine" , lowerCamelCase : Optional[Any]="resnet50" , lowerCamelCase : Any=True , lowerCamelCase : int=False , lowerCamelCase : int=1 , lowerCamelCase : Union[str, Any]=5 , lowerCamelCase : List[Any]=2 , lowerCamelCase : str=1 , lowerCamelCase : Union[str, Any]=1 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : Tuple=2 , lowerCamelCase : List[Any]=0.1 , **lowerCamelCase : Optional[Any] , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[int] = backbone_config.get("model_type" )
__snake_case : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__snake_case : Union[str, Any] = config_class.from_dict(lowerCamelCase )
# set timm attributes to None
__snake_case , __snake_case , __snake_case : Optional[Any] = None, None, None
__snake_case : Optional[Any] = use_timm_backbone
__snake_case : Optional[int] = backbone_config
__snake_case : Any = num_channels
__snake_case : List[Any] = num_queries
__snake_case : Optional[int] = d_model
__snake_case : Optional[int] = encoder_ffn_dim
__snake_case : Dict = encoder_layers
__snake_case : Union[str, Any] = encoder_attention_heads
__snake_case : Optional[Any] = decoder_ffn_dim
__snake_case : Any = decoder_layers
__snake_case : Dict = decoder_attention_heads
__snake_case : List[str] = dropout
__snake_case : List[Any] = attention_dropout
__snake_case : Union[str, Any] = activation_dropout
__snake_case : List[str] = activation_function
__snake_case : List[str] = init_std
__snake_case : int = init_xavier_std
__snake_case : Tuple = encoder_layerdrop
__snake_case : str = decoder_layerdrop
__snake_case : Tuple = encoder_layers
__snake_case : Optional[int] = auxiliary_loss
__snake_case : Tuple = position_embedding_type
__snake_case : str = backbone
__snake_case : Union[str, Any] = use_pretrained_backbone
__snake_case : Tuple = dilation
# Hungarian matcher
__snake_case : Union[str, Any] = class_cost
__snake_case : Dict = bbox_cost
__snake_case : List[Any] = giou_cost
# Loss coefficients
__snake_case : Any = mask_loss_coefficient
__snake_case : Optional[int] = dice_loss_coefficient
__snake_case : str = bbox_loss_coefficient
__snake_case : Dict = giou_loss_coefficient
__snake_case : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : int ) -> int:
return self.encoder_attention_heads
@property
def __snake_case ( self : Optional[int] ) -> int:
return self.d_model
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = version.parse("1.11" )
@property
def __snake_case ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __snake_case ( self : Optional[int] ) -> float:
return 1E-5
@property
def __snake_case ( self : List[Any] ) -> int:
return 12
| 203 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : Any = create_model(
"HTSAT-tiny" , "roberta" , __lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = {}
__snake_case : List[Any] = R".*sequential.(\d+).*"
__snake_case : Any = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[int] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
# replace sequential layers with list
__snake_case : List[Any] = re.match(__lowerCamelCase , __lowerCamelCase ).group(1 )
__snake_case : str = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(__lowerCamelCase )//3}.linear.' )
elif re.match(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[str] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : List[str] = value
__snake_case : Optional[int] = mixed_qkv.size(0 ) // 3
__snake_case : List[str] = mixed_qkv[:qkv_dim]
__snake_case : int = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : str = mixed_qkv[qkv_dim * 2 :]
__snake_case : int = query_layer
__snake_case : Tuple = key_layer
__snake_case : List[str] = value_layer
else:
__snake_case : Tuple = value
return model_state_dict
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : List[str] = init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase )
clap_model.eval()
__snake_case : Union[str, Any] = clap_model.state_dict()
__snake_case : Any = rename_state_dict(__lowerCamelCase )
__snake_case : Dict = ClapConfig()
__snake_case : Dict = enable_fusion
__snake_case : Optional[Any] = ClapModel(__lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
transformers_config.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_snake_case : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 203 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 259 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "van"
def __init__( self ,_A=224 ,_A=3 ,_A=[7, 3, 3, 3] ,_A=[4, 2, 2, 2] ,_A=[64, 128, 320, 512] ,_A=[3, 3, 12, 3] ,_A=[8, 8, 4, 4] ,_A="gelu" ,_A=0.0_2 ,_A=1E-6 ,_A=1E-2 ,_A=0.0 ,_A=0.0 ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : str = image_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Optional[int] = patch_sizes
_lowerCAmelCase : Any = strides
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : List[str] = depths
_lowerCAmelCase : Dict = mlp_ratios
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Tuple = layer_scale_init_value
_lowerCAmelCase : Tuple = drop_path_rate
_lowerCAmelCase : str = dropout_rate
| 259 | 1 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowerCamelCase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowerCamelCase__ = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Union[str, Any] = CamembertTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = "<pad>"
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def __lowercase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(snake_case__ ) , 1_004 )
def __lowercase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def __lowercase ( self : Dict ) -> int:
'''simple docstring'''
_lowercase : Optional[Any] = CamembertTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
_lowercase : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = "I was born in 92000, and this is falsé."
_lowercase : List[Any] = tokenizer.encode(snake_case__ )
_lowercase : List[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowercase : List[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowercase : Union[str, Any] = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
_lowercase : List[str] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def __lowercase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowercase : str = self.get_tokenizer()
_lowercase : List[str] = self.get_rust_tokenizer()
_lowercase : Any = "I was born in 92000, and this is falsé."
_lowercase : str = tokenizer.tokenize(snake_case__ )
_lowercase : Union[str, Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowercase : Dict = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowercase : Dict = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowercase : str = self.get_rust_tokenizer()
_lowercase : List[Any] = tokenizer.encode(snake_case__ )
_lowercase : str = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def __lowercase ( self : Tuple ) -> Any:
'''simple docstring'''
_lowercase : int = {"input_ids": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_lowercase : Dict = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=snake_case__ , )
| 719 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : str , ) -> int:
'''simple docstring'''
_lowercase : List[Any] = parent
_lowercase : List[str] = 13
_lowercase : str = 7
_lowercase : Dict = True
_lowercase : Union[str, Any] = True
_lowercase : Dict = True
_lowercase : str = True
_lowercase : str = True
_lowercase : List[str] = False
_lowercase : List[Any] = False
_lowercase : Optional[Any] = False
_lowercase : str = 2
_lowercase : str = 99
_lowercase : Optional[Any] = 0
_lowercase : List[str] = 32
_lowercase : Union[str, Any] = 2
_lowercase : List[str] = 4
_lowercase : Optional[int] = 0.1
_lowercase : Optional[Any] = 0.1
_lowercase : Optional[Any] = 512
_lowercase : int = 16
_lowercase : List[Any] = 2
_lowercase : Optional[Any] = 0.02
_lowercase : Any = 3
_lowercase : List[str] = 4
_lowercase : List[Any] = '''last'''
_lowercase : str = True
_lowercase : Any = None
_lowercase : List[str] = 0
def __lowercase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
_lowercase : Any = None
if self.use_input_lengths:
_lowercase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Union[str, Any] = None
if self.use_token_type_ids:
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowercase : Dict = None
_lowercase : Optional[Any] = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : int = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = TFFlaubertModel(config=UpperCamelCase_ )
_lowercase : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_lowercase : List[str] = model(UpperCamelCase_ )
_lowercase : Optional[int] = [input_ids, input_mask]
_lowercase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = TFFlaubertWithLMHeadModel(UpperCamelCase_ )
_lowercase : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
_lowercase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = TFFlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
_lowercase : List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_lowercase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = TFFlaubertForSequenceClassification(UpperCamelCase_ )
_lowercase : str = {'''input_ids''': input_ids, '''lengths''': input_lengths}
_lowercase : Union[str, Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_labels
_lowercase : int = TFFlaubertForTokenClassification(config=UpperCamelCase_ )
_lowercase : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowercase : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = self.num_choices
_lowercase : Tuple = TFFlaubertForMultipleChoice(config=UpperCamelCase_ )
_lowercase : Any = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_lowercase : Any = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_lowercase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case_ = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = TFFlaubertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def __lowercase ( self : Dict ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : int ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def __lowercase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def __lowercase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCamelCase_ )
def __lowercase ( self : Dict ) -> int:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCamelCase_ )
@slow
def __lowercase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[int] = TFFlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowercase ( self : str ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
_lowercase : Optional[int] = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
_lowercase : Tuple = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
_lowercase : Optional[Any] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 411 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCAmelCase_ : List[Any] = '.'
if __name__ == "__main__":
UpperCAmelCase_ : Any = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCAmelCase_ : int = line.strip()
UpperCAmelCase_ : str = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCAmelCase_ : int = '\n'.join(non_existent_paths)
raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 533 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
__lowerCamelCase = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
__lowerCamelCase = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
__lowerCamelCase = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
__lowerCamelCase = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModel)
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class snake_case_ (_BaseAutoModelClass ):
"""simple docstring"""
_lowerCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 719 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = AltDiffusionPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ ( self):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
UpperCAmelCase_ : Any = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowercase ,set_alpha_to_one=lowercase ,)
torch.manual_seed(0)
UpperCAmelCase_ : Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
UpperCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
UpperCAmelCase_ : Dict = CLIPTextModel(lowercase)
UpperCAmelCase_ : List[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
UpperCAmelCase_ : List[str] = 77
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self ,lowercase ,lowercase=0):
"""simple docstring"""
if str(lowercase).startswith("mps"):
UpperCAmelCase_ : Any = torch.manual_seed(lowercase)
else:
UpperCAmelCase_ : Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
UpperCAmelCase_ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A_ ( self):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3)
def A_ ( self):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
torch.manual_seed(0)
UpperCAmelCase_ : List[Any] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Any = RobertaSeriesModelWithTransformation(lowercase)
UpperCAmelCase_ : Optional[Any] = text_encoder
UpperCAmelCase_ : List[Any] = AltDiffusionPipeline(**lowercase)
UpperCAmelCase_ : Union[str, Any] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(lowercase)
UpperCAmelCase_ : Optional[Any] = "A photo of an astronaut"
UpperCAmelCase_ : str = alt_pipe(**lowercase)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[Any] = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase)
torch.manual_seed(0)
UpperCAmelCase_ : Any = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(lowercase)
UpperCAmelCase_ : Dict = text_encoder
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline(**lowercase)
UpperCAmelCase_ : List[Any] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : str = self.get_dummy_inputs(lowercase)
UpperCAmelCase_ : Union[str, Any] = alt_pipe(**lowercase)
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
"""simple docstring"""
def A_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,safety_checker=lowercase)
UpperCAmelCase_ : Any = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Any = torch.manual_seed(0)
UpperCAmelCase_ : Optional[int] = alt_pipe([prompt] ,generator=lowercase ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="np")
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Dict = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" ,subfolder="scheduler")
UpperCAmelCase_ : Optional[int] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,scheduler=lowercase ,safety_checker=lowercase)
UpperCAmelCase_ : List[str] = alt_pipe.to(lowercase)
alt_pipe.set_progress_bar_config(disable=lowercase)
UpperCAmelCase_ : str = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0)
UpperCAmelCase_ : List[str] = alt_pipe([prompt] ,generator=lowercase ,num_inference_steps=2 ,output_type="numpy")
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : List[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 455 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->str:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase =tmp_path / "cache"
_UpperCAmelCase ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase =ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase =tmp_path / "cache"
_UpperCAmelCase ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase =features.copy() if features else default_expected_features
_UpperCAmelCase =(
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase =ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
_UpperCAmelCase =tmp_path / "cache"
_UpperCAmelCase ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase =ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
if issubclass(_lowerCamelCase , _lowerCamelCase ):
_UpperCAmelCase =parquet_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
_UpperCAmelCase =[parquet_path]
_UpperCAmelCase =tmp_path / "cache"
_UpperCAmelCase ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase =ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ) ->int:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
_UpperCAmelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict:
_UpperCAmelCase =tmp_path / "cache"
_UpperCAmelCase ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
_UpperCAmelCase =tmp_path / "cache"
_UpperCAmelCase ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase =features.copy() if features else default_expected_features
_UpperCAmelCase =(
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase =ParquetDatasetReader({"train": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
if split:
_UpperCAmelCase ={split: parquet_path}
else:
_UpperCAmelCase ="train"
_UpperCAmelCase ={"train": parquet_path, "test": parquet_path}
_UpperCAmelCase =tmp_path / "cache"
_UpperCAmelCase ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase =ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Tuple:
_UpperCAmelCase =ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase =pq.ParquetFile(tmp_path / "foo.parquet" )
_UpperCAmelCase =pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Any:
_UpperCAmelCase =str(shared_datadir / "test_image_rgb.jpg" )
_UpperCAmelCase ={"image": [image_path]}
_UpperCAmelCase =Features({"image": Image()} )
_UpperCAmelCase =Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase )
_UpperCAmelCase =ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[str]:
assert get_writer_batch_size(_lowerCamelCase ) == expected
| 408 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =StableDiffusionSAGPipeline
snake_case =TEXT_TO_IMAGE_PARAMS
snake_case =TEXT_TO_IMAGE_BATCH_PARAMS
snake_case =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case =False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase =CLIPTextModel(_snake_case )
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
else:
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCAmelCase ={
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
_UpperCAmelCase =sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="."
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_UpperCAmelCase =output.images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase =np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase =sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="."
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_UpperCAmelCase =output.images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase =np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase =sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="."
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sag_pipe(
[prompt] , width=768 , height=512 , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
_UpperCAmelCase =output.images
assert image.shape == (1, 512, 768, 3)
| 408 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a =logging.get_logger(__name__) # pylint: disable=invalid-name
a ='\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=8 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
lowerCamelCase__ =2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if latents is None:
lowerCamelCase__ =randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase__ =latents.to(__UpperCamelCase )
lowerCamelCase__ =latents * scheduler.init_noise_sigma
return latents
def _a ( self , _lowerCamelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase__ =torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase__ =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def _a ( self , _lowerCamelCase=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowerCamelCase__ =torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ =None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ , lowerCamelCase__ =cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
lowerCamelCase__ =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
lowerCamelCase__ =self._execution_device
lowerCamelCase__ =guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase__ =torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase__ =torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase__ =torch.cat(__UpperCamelCase , dim=0 )
lowerCamelCase__ =image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowerCamelCase__ =image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
lowerCamelCase__ =negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
lowerCamelCase__ =hint.repeat_interleave(__UpperCamelCase , dim=0 )
lowerCamelCase__ =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
lowerCamelCase__ =torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
lowerCamelCase__ =self.scheduler.timesteps
lowerCamelCase__ =self.movq.config.latent_channels
lowerCamelCase__ , lowerCamelCase__ =downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
lowerCamelCase__ =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ ={"image_embeds": image_embeds, "hint": hint}
lowerCamelCase__ =self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ =noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase__ , lowerCamelCase__ =noise_pred.chunk(2 )
lowerCamelCase__ , lowerCamelCase__ =variance_pred.chunk(2 )
lowerCamelCase__ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ , lowerCamelCase__ =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ =self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
lowerCamelCase__ =self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCamelCase__ =image * 0.5 + 0.5
lowerCamelCase__ =image.clamp(0 , 1 )
lowerCamelCase__ =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ =self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 715 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
return number | (1 << position)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 |
def _lowerCAmelCase ( __magic_name__ :list[list[int]] , __magic_name__ :int , __magic_name__ :int , __magic_name__ :set ):
UpperCAmelCase_, UpperCAmelCase_ = len(__magic_name__ ), len(grid[0] )
if (
min(__magic_name__ , __magic_name__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase_ = 0
count += depth_first_search(__magic_name__ , row + 1 , __magic_name__ , __magic_name__ )
count += depth_first_search(__magic_name__ , row - 1 , __magic_name__ , __magic_name__ )
count += depth_first_search(__magic_name__ , __magic_name__ , col + 1 , __magic_name__ )
count += depth_first_search(__magic_name__ , __magic_name__ , col - 1 , __magic_name__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : str = logging.get_logger(__name__)
__A : Optional[int] = {'vocab_file': 'spiece.model'}
__A : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
__A : str = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , a : List[str] , a : Any=False , a : Tuple=False , a : Dict=False , a : str=None , a : List[str]=None , a : Any=None , a : str=None , a : Optional[Dict[str, Any]] = None , **a : str , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
SCREAMING_SNAKE_CASE = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE = "<|endoftext|>" if eos_token is None else eos_token
SCREAMING_SNAKE_CASE = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE = "<pad>" if pad_token is None else pad_token
SCREAMING_SNAKE_CASE = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE = re.compile(
f"""[{"".join(map(__A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Tuple , a : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _UpperCAmelCase ( self : int ) -> List[Any]:
return len(self.sp_model )
def _UpperCAmelCase ( self : Dict , a : str ) -> Any:
SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub("""""" , __A )
# Normalize whitespaces
SCREAMING_SNAKE_CASE = "".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFC""" , __A )
return text
def _UpperCAmelCase ( self : Union[str, Any] , a : str , **a : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE = self.preprocess_text(__A )
return self.sp_model.encode(__A , out_type=__A )
def _UpperCAmelCase ( self : int , a : str ) -> Tuple:
return self.sp_model.PieceToId(__A )
def _UpperCAmelCase ( self : Optional[int] , a : int ) -> List[str]:
return self.sp_model.IdToPiece(__A )
@staticmethod
def _UpperCAmelCase ( a : str ) -> Optional[int]:
return out_string
def _UpperCAmelCase ( self : str , a : List[str] ) -> Any:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(__A )
SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(__A )
return out_string
def _UpperCAmelCase ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ) -> int:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def _UpperCAmelCase ( self : Optional[int] , a : Union[str, List[str]] , a : Union[str, bool] = False ) -> Optional[Any]:
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE = self.preprocess_text(__A )
SCREAMING_SNAKE_CASE = self.sp_model.encode(__A )
else:
SCREAMING_SNAKE_CASE = [self.preprocess_text(__A ) for t in text]
SCREAMING_SNAKE_CASE = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE = torch.tensor(__A )
return token_ids
def _UpperCAmelCase ( self : List[Any] , a : Union[int, List[int]] ) -> Dict:
return self.sp_model.decode(__A )
def _UpperCAmelCase ( self : Optional[int] , a : "Conversation" ) -> Optional[int]:
SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__A ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=__A )
| 711 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=a ).to(a )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE = model(input_ids.to(a ) , labels=labels.to(a ) ).loss
SCREAMING_SNAKE_CASE = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 450 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[str] =logging.get_logger(__name__)
_lowercase : List[Any] ={
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : List[Any] = "owlvit_text_model"
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[str]=4_94_08 , SCREAMING_SNAKE_CASE__ : Any=5_12 , SCREAMING_SNAKE_CASE__ : Any=20_48 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : Dict=8 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Tuple="quick_gelu" , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Any=4_94_06 , SCREAMING_SNAKE_CASE__ : str=4_94_07 , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Optional[int]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =vocab_size
A : Optional[int] =hidden_size
A : Dict =intermediate_size
A : str =num_hidden_layers
A : Tuple =num_attention_heads
A : Tuple =max_position_embeddings
A : Dict =hidden_act
A : List[Any] =layer_norm_eps
A : Optional[int] =attention_dropout
A : Optional[int] =initializer_range
A : Optional[Any] =initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A , A : List[str] =cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
A : Optional[int] =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : List[str] = "owlvit_vision_model"
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Any=7_68 , SCREAMING_SNAKE_CASE__ : Any=30_72 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="quick_gelu" , SCREAMING_SNAKE_CASE__ : Dict=1e-5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0_2 , SCREAMING_SNAKE_CASE__ : Tuple=1.0 , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : str =hidden_size
A : Tuple =intermediate_size
A : Optional[int] =num_hidden_layers
A : Any =num_attention_heads
A : Union[str, Any] =num_channels
A : Tuple =image_size
A : str =patch_size
A : Optional[int] =hidden_act
A : Optional[Any] =layer_norm_eps
A : Tuple =attention_dropout
A : Optional[int] =initializer_range
A : Optional[int] =initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A , A : List[Any] =cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
A : Any =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Dict = "owlvit"
lowercase : List[str] = True
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2.6_5_9_2 , SCREAMING_SNAKE_CASE__ : List[Any]=True , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if text_config is None:
A : Dict ={}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
A : Optional[Any] ={}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
A : str =OwlViTTextConfig(**SCREAMING_SNAKE_CASE__ )
A : int =OwlViTVisionConfig(**SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =projection_dim
A : Any =logit_scale_init_value
A : int =return_dict
A : Dict =1.0
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A , A : Optional[int] =cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
A : Tuple ={}
A : Any =text_config
A : List[Any] =vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> int:
A : int =copy.deepcopy(self.__dict__ )
A : Optional[Any] =self.text_config.to_dict()
A : Union[str, Any] =self.vision_config.to_dict()
A : Optional[int] =self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> float:
return 1e-4
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : "ProcessorMixin" , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
A : Tuple =super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
A : int =super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return {**text_input_dict, **image_input_dict}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> int:
return 14
| 305 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowercase : List[Any] =logging.get_logger(__name__)
_lowercase : str ={
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "marian"
lowercase : Dict = ["past_key_values"]
lowercase : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_81_01 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any=10_24 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : Tuple=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[str]=40_96 , SCREAMING_SNAKE_CASE__ : Dict=16 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : str=10_24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0_2 , SCREAMING_SNAKE_CASE__ : int=5_81_00 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[int]=5_81_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : int=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Union[str, Any]:
A : Optional[Any] =vocab_size
A : List[Any] =decoder_vocab_size or vocab_size
A : Optional[Any] =max_position_embeddings
A : Optional[int] =d_model
A : Dict =encoder_ffn_dim
A : List[str] =encoder_layers
A : Optional[Any] =encoder_attention_heads
A : str =decoder_ffn_dim
A : Optional[Any] =decoder_layers
A : int =decoder_attention_heads
A : Optional[Any] =dropout
A : Tuple =attention_dropout
A : str =activation_dropout
A : int =activation_function
A : int =init_std
A : Union[str, Any] =encoder_layerdrop
A : str =decoder_layerdrop
A : List[str] =use_cache
A : Optional[int] =encoder_layers
A : Tuple =scale_embedding # scale factor will be sqrt(d_model) if True
A : Tuple =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
A : Any =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A : Optional[Any] ={0: 'batch'}
A : Optional[int] ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A : Tuple ={0: 'batch', 1: 'decoder_sequence'}
A : List[Any] ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A : Dict =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A : List[Any] =self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Dict ={0: 'batch', 2: 'past_sequence + sequence'}
A : Any ={0: 'batch', 2: 'past_sequence + sequence'}
else:
A : List[str] =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
A : List[Any] =super().outputs
else:
A : Optional[Any] =super(SCREAMING_SNAKE_CASE__ , self ).outputs
if self.use_past:
A , A : List[str] =self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ={0: 'batch', 2: 'past_sequence + sequence'}
A : Union[str, Any] ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
A : str =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
A : List[Any] =seq_length if not self.use_past else 1
A : Dict =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : List[str] ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A : Optional[Any] =dict(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A : List[Any] =common_inputs['input_ids'].shape
A : Tuple =common_inputs['decoder_input_ids'].shape[1]
A , A : Union[str, Any] =self.num_attention_heads
A : List[str] =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A : List[Any] =decoder_seq_length + 3
A : str =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A : Tuple =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , dim=1 )
A : List[Any] =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A : List[Any] =self.num_layers
A : Optional[Any] =min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[int] =max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - min_num_layers
A : str ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
A : List[str] =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
A : str =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A : str =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A : str =seqlen + 2
A , A : Dict =self.num_layers
A , A : Any =self.num_attention_heads
A : Optional[int] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A : str =common_inputs['attention_mask'].dtype
A : Optional[int] =torch.cat(
[common_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
A : Dict =[
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A : List[Any] =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A : Union[str, Any] =tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
A : Optional[Any] =[' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A : Tuple =dict(tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
A : Union[str, Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
else:
A : Dict =self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
A : List[Any] =super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
A : List[str] =super(SCREAMING_SNAKE_CASE__ , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> float:
return 1e-4
| 305 | 1 |
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 710 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : List[Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowerCamelCase : str = {"facebook/blenderbot_small-90M": 512}
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
__lowerCAmelCase = set(__snake_case )
return pairs
class _UpperCamelCase (lowercase__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="__start__" , __UpperCamelCase="__end__" , __UpperCamelCase="__unk__" , __UpperCamelCase="__null__" , **__UpperCamelCase , )-> Optional[int]:
super().__init__(unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(__UpperCamelCase )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
__lowerCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__lowerCAmelCase = {}
@property
def __UpperCAmelCase ( self )-> Any:
return len(self.encoder )
def __UpperCAmelCase ( self )-> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , __UpperCamelCase )-> Tuple:
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = re.sub("([.,!?()])" , R" \1" , __UpperCamelCase )
__lowerCAmelCase = re.sub("(')" , R" \1 " , __UpperCamelCase )
__lowerCAmelCase = re.sub(R"\s{2,}" , " " , __UpperCamelCase )
if "\n" in token:
__lowerCAmelCase = token.replace("\n" , " __newln__" )
__lowerCAmelCase = token.split(" " )
__lowerCAmelCase = []
for token in tokens:
if not len(__UpperCamelCase ):
continue
__lowerCAmelCase = token.lower()
__lowerCAmelCase = tuple(__UpperCamelCase )
__lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__lowerCAmelCase = get_pairs(__UpperCamelCase )
if not pairs:
words.append(__UpperCamelCase )
continue
while True:
__lowerCAmelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(__UpperCamelCase ):
try:
__lowerCAmelCase = word.index(__UpperCamelCase , __UpperCamelCase )
new_word.extend(word[i:j] )
__lowerCAmelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(__UpperCamelCase )
__lowerCAmelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
__lowerCAmelCase = get_pairs(__UpperCamelCase )
__lowerCAmelCase = "@@ ".join(__UpperCamelCase )
__lowerCAmelCase = word[:-4]
__lowerCAmelCase = word
words.append(__UpperCamelCase )
return " ".join(__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase )-> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = re.findall(R"\S+\n?" , __UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(" " ) ) )
return split_tokens
def __UpperCAmelCase ( self , __UpperCamelCase )-> Optional[int]:
__lowerCAmelCase = token.lower()
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , __UpperCamelCase )-> List[str]:
return self.decoder.get(__UpperCamelCase , self.unk_token )
def __UpperCAmelCase ( self , __UpperCamelCase )-> Union[str, Any]:
__lowerCAmelCase = " ".join(__UpperCamelCase ).replace("@@ " , "" ).strip()
return out_string
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None )-> str:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
__lowerCAmelCase = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 367 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCamelCase = 2
class __A :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
a__="<s>" , a__="<pad>" , a__="</s>" , a__="<unk>" , a__=None , ):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = bos, unk, pad, eos
_lowerCamelCase : str = []
_lowerCamelCase : List[str] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[Any] = self.add_symbol(_A)
_lowerCamelCase : str = self.add_symbol(_A)
_lowerCamelCase : int = self.add_symbol(_A)
_lowerCamelCase : Union[str, Any] = self.add_symbol(_A)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_A)
_lowerCamelCase : Dict = len(self.symbols)
def __eq__( self , a__):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , a__):
"""simple docstring"""
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
"""simple docstring"""
return len(self.symbols)
def __contains__( self , a__):
"""simple docstring"""
return sym in self.indices
@classmethod
def __snake_case ( cls , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = cls()
d.add_from_file(_A)
return d
def __snake_case ( self , a__ , a__=1 , a__=False):
"""simple docstring"""
if word in self.indices and not overwrite:
_lowerCamelCase : int = self.indices[word]
_lowerCamelCase : str = self.count[idx] + n
return idx
else:
_lowerCamelCase : Union[str, Any] = len(self.symbols)
_lowerCamelCase : List[str] = idx
self.symbols.append(_A)
self.count.append(_A)
return idx
def __snake_case ( self , a__):
"""simple docstring"""
return 0
def __snake_case ( self , a__):
"""simple docstring"""
if isinstance(_A , _A):
try:
with open(_A , '''r''' , encoding='''utf-8''') as fd:
self.add_from_file(_A)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_A))
return
_lowerCamelCase : List[str] = f.readlines()
_lowerCamelCase : str = self._load_meta(_A)
for line in lines[indices_start_line:]:
try:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = line.rstrip().rsplit(''' ''' , 1)
if field == "#fairseq:overwrite":
_lowerCamelCase : int = True
_lowerCamelCase, _lowerCamelCase : List[Any] = line.rsplit(''' ''' , 1)
else:
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : str = int(_A)
_lowerCamelCase : Dict = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(_A))
self.add_symbol(_A , n=_A , overwrite=_A)
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''')
def __UpperCAmelCase( lowercase_ ):
'''simple docstring'''
_lowerCamelCase : List[Any] = dict((re.sub(r'''@@$''' , '''''' , __snake_case ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , __snake_case ), v) for k, v in d.items() )
_lowerCamelCase : List[str] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_lowerCamelCase : Dict = d[k] # restore
return da
def __UpperCAmelCase( lowercase_ , lowercase_ ):
'''simple docstring'''
if not os.path.exists(__snake_case ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__snake_case , exist_ok=__snake_case )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_lowerCamelCase : Union[str, Any] = os.path.join(__snake_case , '''checkpoint.pt''' )
if not os.path.isfile(__snake_case ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_lowerCamelCase : Dict = torch.load(__snake_case , map_location='''cpu''' )
_lowerCamelCase : Tuple = chkpt['''cfg''']['''model''']
# dicts
_lowerCamelCase : Optional[int] = os.path.join(__snake_case , '''dict.txt''' )
if not os.path.isfile(__snake_case ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_lowerCamelCase : Any = Dictionary.load(__snake_case )
_lowerCamelCase : Any = rewrite_dict_keys(src_dict.indices )
_lowerCamelCase : Dict = len(__snake_case )
_lowerCamelCase : List[str] = os.path.join(__snake_case , VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# merges_file (bpecodes)
_lowerCamelCase : str = os.path.join(__snake_case , '''bpecodes''' )
if not os.path.isfile(__snake_case ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_lowerCamelCase : Dict = os.path.join(__snake_case , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(__snake_case , __snake_case )
# model config
_lowerCamelCase : Optional[int] = os.path.join(__snake_case , '''config.json''' )
_lowerCamelCase : Dict = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# tokenizer config
_lowerCamelCase : Any = os.path.join(__snake_case , __snake_case )
_lowerCamelCase : Tuple = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# model
_lowerCamelCase : str = chkpt['''model''']
# remove unneeded keys
_lowerCamelCase : Optional[int] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(__snake_case , __snake_case )
_lowerCamelCase : str = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_lowerCamelCase : Optional[int] = model_state_dict.pop(__snake_case )
else:
_lowerCamelCase : List[str] = model_state_dict.pop(__snake_case )
_lowerCamelCase : Optional[Any] = BioGptConfig.from_pretrained(__snake_case )
_lowerCamelCase : Any = BioGptForCausalLM(__snake_case )
# check that it loads ok
model_new.load_state_dict(__snake_case )
# save
_lowerCamelCase : Tuple = os.path.join(__snake_case , __snake_case )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__snake_case , __snake_case )
print('''Conversion is done!''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 705 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'RegNetConfig'
# Base docstring
_lowerCamelCase = 'facebook/regnet-y-040'
_lowerCamelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCamelCase = 'facebook/regnet-y-040'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ = 3 , a__ = 1 , a__ = 1 , a__ = "relu" , **a__ , ):
"""simple docstring"""
super().__init__(**a__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCamelCase : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
_lowerCamelCase : List[str] = tf.keras.layers.ConvaD(
filters=a__ , kernel_size=a__ , strides=a__ , padding='''VALID''' , groups=a__ , use_bias=a__ , name='''convolution''' , )
_lowerCamelCase : List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''')
_lowerCamelCase : Tuple = ACTaFN[activation] if activation is not None else tf.identity
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.convolution(self.padding(a__))
_lowerCamelCase : Any = self.normalization(a__)
_lowerCamelCase : Any = self.activation(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Optional[int] = config.num_channels
_lowerCamelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : int = shape_list(a__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCamelCase : List[str] = tf.transpose(a__ , perm=(0, 2, 3, 1))
_lowerCamelCase : List[Any] = self.embedder(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ = 2 , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Dict = tf.keras.layers.ConvaD(
filters=a__ , kernel_size=1 , strides=a__ , use_bias=a__ , name='''convolution''')
_lowerCamelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''')
def __snake_case ( self , a__ , a__ = False):
"""simple docstring"""
return self.normalization(self.convolution(a__) , training=a__)
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a__ , name='''pooler''')
_lowerCamelCase : str = [
tf.keras.layers.ConvaD(filters=a__ , kernel_size=1 , activation='''relu''' , name='''attention.0'''),
tf.keras.layers.ConvaD(filters=a__ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2'''),
]
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Tuple = self.pooler(a__)
for layer_module in self.attention:
_lowerCamelCase : Optional[Any] = layer_module(a__)
_lowerCamelCase : List[str] = hidden_state * pooled
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__ , a__ = 1 , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCamelCase : Optional[int] = max(1 , out_channels // config.groups_width)
_lowerCamelCase : int = (
TFRegNetShortCut(a__ , stride=a__ , name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCamelCase : List[Any] = [
TFRegNetConvLayer(a__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0'''),
TFRegNetConvLayer(
a__ , stride=a__ , groups=a__ , activation=config.hidden_act , name='''layer.1'''),
TFRegNetConvLayer(a__ , kernel_size=1 , activation=a__ , name='''layer.2'''),
]
_lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCamelCase : Optional[Any] = layer_module(a__)
_lowerCamelCase : int = self.shortcut(a__)
hidden_state += residual
_lowerCamelCase : Union[str, Any] = self.activation(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__ , a__ = 1 , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Dict = in_channels != out_channels or stride != 1
_lowerCamelCase : str = max(1 , out_channels // config.groups_width)
_lowerCamelCase : List[str] = (
TFRegNetShortCut(a__ , stride=a__ , name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''')
)
_lowerCamelCase : List[Any] = [
TFRegNetConvLayer(a__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0'''),
TFRegNetConvLayer(
a__ , stride=a__ , groups=a__ , activation=config.hidden_act , name='''layer.1'''),
TFRegNetSELayer(a__ , reduced_channels=int(round(in_channels / 4)) , name='''layer.2'''),
TFRegNetConvLayer(a__ , kernel_size=1 , activation=a__ , name='''layer.3'''),
]
_lowerCamelCase : int = ACTaFN[config.hidden_act]
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : List[str] = hidden_state
for layer_module in self.layers:
_lowerCamelCase : str = layer_module(a__)
_lowerCamelCase : str = self.shortcut(a__)
hidden_state += residual
_lowerCamelCase : Optional[Any] = self.activation(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__ , a__ = 2 , a__ = 2 , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : int = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_lowerCamelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(a__ , a__ , a__ , stride=a__ , name='''layers.0'''),
*[layer(a__ , a__ , a__ , name=F"""layers.{i+1}""") for i in range(depth - 1)],
]
def __snake_case ( self , a__):
"""simple docstring"""
for layer_module in self.layers:
_lowerCamelCase : Optional[Any] = layer_module(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ))
_lowerCamelCase : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(a__ , config.depths[1:])):
self.stages.append(TFRegNetStage(a__ , a__ , a__ , depth=a__ , name=F"""stages.{i+1}"""))
def __snake_case ( self , a__ , a__ = False , a__ = True):
"""simple docstring"""
_lowerCamelCase : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCamelCase : str = hidden_states + (hidden_state,)
_lowerCamelCase : str = stage_module(a__)
if output_hidden_states:
_lowerCamelCase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__)
@keras_serializable
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
UpperCAmelCase__ = RegNetConfig
def __init__( self , a__ , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : int = config
_lowerCamelCase : Tuple = TFRegNetEmbeddings(a__ , name='''embedder''')
_lowerCamelCase : str = TFRegNetEncoder(a__ , name='''encoder''')
_lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a__ , name='''pooler''')
@unpack_inputs
def __snake_case ( self , a__ , a__ = None , a__ = None , a__ = False , ):
"""simple docstring"""
_lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Optional[int] = self.embedder(a__ , training=a__)
_lowerCamelCase : Optional[int] = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ , training=a__)
_lowerCamelCase : Dict = encoder_outputs[0]
_lowerCamelCase : Union[str, Any] = self.pooler(a__)
# Change to NCHW output format have uniformity in the modules
_lowerCamelCase : List[str] = tf.transpose(a__ , perm=(0, 3, 1, 2))
_lowerCamelCase : Optional[int] = tf.transpose(a__ , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCamelCase : Tuple = tuple([tf.transpose(a__ , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = RegNetConfig
UpperCAmelCase__ = """regnet"""
UpperCAmelCase__ = """pixel_values"""
@property
def __snake_case ( self):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa)}
_lowerCamelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,lowerCamelCase__ ,)
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , *a__ , **a__):
"""simple docstring"""
super().__init__(a__ , *a__ , **a__)
_lowerCamelCase : str = TFRegNetMainLayer(a__ , name='''regnet''')
@unpack_inputs
@add_start_docstrings_to_model_forward(a__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __snake_case ( self , a__ , a__ = None , a__ = None , a__=False , ):
"""simple docstring"""
_lowerCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Tuple = self.regnet(
pixel_values=a__ , output_hidden_states=a__ , return_dict=a__ , training=a__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,lowerCamelCase__ ,)
class __A ( lowerCamelCase__ ,lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , *a__ , **a__):
"""simple docstring"""
super().__init__(a__ , *a__ , **a__)
_lowerCamelCase : Dict = config.num_labels
_lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(a__ , name='''regnet''')
# classification head
_lowerCamelCase : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __snake_case ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__=False , ):
"""simple docstring"""
_lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Union[str, Any] = self.regnet(
a__ , output_hidden_states=a__ , return_dict=a__ , training=a__)
_lowerCamelCase : Any = outputs.pooler_output if return_dict else outputs[1]
_lowerCamelCase : Union[str, Any] = self.classifier[0](a__)
_lowerCamelCase : str = self.classifier[1](a__)
_lowerCamelCase : List[Any] = None if labels is None else self.hf_compute_loss(labels=a__ , logits=a__)
if not return_dict:
_lowerCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states)
| 613 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = '''▁'''
__a = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__a = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__a = {
'''facebook/xglm-564M''': 20_48,
}
class __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
A : List[Any] = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[int] = 7
lowercase : Dict = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase : Tuple = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
lowercase : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : Tuple = len(self.sp_model )
lowercase : Optional[int] = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCamelCase__ )
lowercase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowercase : Tuple = self.__dict__.copy()
lowercase : str = None
lowercase : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Union[str, Any] = {}
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : int = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ ))
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCamelCase ( self ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCamelCase ( self ):
lowercase : Any = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : Dict = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
lowercase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 319 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Optional[int]=13 , UpperCamelCase__ :Optional[Any]=7 , UpperCamelCase__ :Dict=True , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Any=True , UpperCamelCase__ :Tuple=True , UpperCamelCase__ :Dict=99 , UpperCamelCase__ :Union[str, Any]=32 , UpperCamelCase__ :Dict=2 , UpperCamelCase__ :List[str]=4 , UpperCamelCase__ :Any=37 , UpperCamelCase__ :int="gelu" , UpperCamelCase__ :str=0.1 , UpperCamelCase__ :Union[str, Any]=0.1 , UpperCamelCase__ :Optional[Any]=512 , UpperCamelCase__ :Optional[Any]=16 , UpperCamelCase__ :Optional[Any]=2 , UpperCamelCase__ :Optional[Any]=0.02 , UpperCamelCase__ :List[Any]=False , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Optional[int]="None" , UpperCamelCase__ :Any=3 , UpperCamelCase__ :Optional[int]=4 , UpperCamelCase__ :List[str]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self :Dict , UpperCamelCase__ :int , UpperCamelCase__ :int , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :int ):
_a = TFDebertaVaModel(config=UpperCamelCase__ )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = [input_ids, input_mask]
_a = model(UpperCamelCase__ )
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :int , UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[str] ):
_a = TFDebertaVaForMaskedLM(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :str , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :str , UpperCamelCase__ :Dict , UpperCamelCase__ :Tuple , UpperCamelCase__ :Any ):
_a = self.num_labels
_a = TFDebertaVaForSequenceClassification(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :int , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :int , UpperCamelCase__ :Any , UpperCamelCase__ :int , UpperCamelCase__ :Optional[Any] ):
_a = self.num_labels
_a = TFDebertaVaForTokenClassification(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Dict , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :Any ):
_a = TFDebertaVaForQuestionAnswering(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = TFDebertaVaModelTester(self )
_a = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self :str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
_a = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
_a = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 )
| 388 | 0 |
import qiskit
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend('aer_simulator')
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase)
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0)
circuit.x(1)
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1])
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase)
if __name__ == "__main__":
a_ : int = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 444 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a_ : int = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 444 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _lowerCamelCase ( _lowercase ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] ="detr"
UpperCAmelCase_ : Tuple =["past_key_values"]
UpperCAmelCase_ : Optional[Any] ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=3 , UpperCAmelCase=100 , UpperCAmelCase=6 , UpperCAmelCase=2048 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=2048 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase="resnet50" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , **UpperCAmelCase , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = backbone_config.get("model_type" )
__snake_case : Any = CONFIG_MAPPING[backbone_model_type]
__snake_case : Optional[Any] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__snake_case : str = None, None, None
__snake_case : Union[str, Any] = use_timm_backbone
__snake_case : Dict = backbone_config
__snake_case : Any = num_channels
__snake_case : List[str] = num_queries
__snake_case : Any = d_model
__snake_case : Union[str, Any] = encoder_ffn_dim
__snake_case : Dict = encoder_layers
__snake_case : List[str] = encoder_attention_heads
__snake_case : str = decoder_ffn_dim
__snake_case : Any = decoder_layers
__snake_case : Optional[Any] = decoder_attention_heads
__snake_case : int = dropout
__snake_case : int = attention_dropout
__snake_case : int = activation_dropout
__snake_case : int = activation_function
__snake_case : Dict = init_std
__snake_case : List[str] = init_xavier_std
__snake_case : Union[str, Any] = encoder_layerdrop
__snake_case : List[Any] = decoder_layerdrop
__snake_case : List[str] = encoder_layers
__snake_case : List[Any] = auxiliary_loss
__snake_case : Optional[Any] = position_embedding_type
__snake_case : Optional[int] = backbone
__snake_case : Dict = use_pretrained_backbone
__snake_case : str = dilation
# Hungarian matcher
__snake_case : Tuple = class_cost
__snake_case : Union[str, Any] = bbox_cost
__snake_case : Optional[int] = giou_cost
# Loss coefficients
__snake_case : Tuple = mask_loss_coefficient
__snake_case : List[Any] = dice_loss_coefficient
__snake_case : Optional[int] = bbox_loss_coefficient
__snake_case : List[Any] = giou_loss_coefficient
__snake_case : Dict = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase ( self ) -> Dict[str, any]:
'''simple docstring'''
__snake_case : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__snake_case : Optional[Any] = self.backbone_config.to_dict()
__snake_case : Union[str, Any] = self.__class__.model_type
return output
class _lowerCamelCase ( _lowercase ):
"""simple docstring"""
UpperCAmelCase_ : int =version.parse("1.11" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return 12
| 243 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = (DPMSolverSDEScheduler,)
lowerCamelCase__: int = 10
def _lowerCamelCase ( self: Any , **__lowerCamelCase: Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**__lowerCamelCase )
return config
def _lowerCamelCase ( self: List[str] ) -> Dict:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
__UpperCAmelCase : List[str] = self.scheduler_classes[0]
__UpperCAmelCase : List[str] = self.get_scheduler_config()
__UpperCAmelCase : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : str = self.dummy_model()
__UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : List[Any] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : List[Any] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : int = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = output.prev_sample
__UpperCAmelCase : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
__UpperCAmelCase : List[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : List[str] = self.dummy_model()
__UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : Optional[int] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : Optional[int] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = output.prev_sample
__UpperCAmelCase : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : str = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
__UpperCAmelCase : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase )
__UpperCAmelCase : int = self.dummy_model()
__UpperCAmelCase : List[Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCAmelCase : int = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = output.prev_sample
__UpperCAmelCase : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : List[Any] = self.scheduler_classes[0]
__UpperCAmelCase : Dict = self.get_scheduler_config()
__UpperCAmelCase : List[Any] = scheduler_class(**__lowerCamelCase , use_karras_sigmas=__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase )
__UpperCAmelCase : List[Any] = self.dummy_model()
__UpperCAmelCase : Any = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
__UpperCAmelCase : Dict = sample.to(__lowerCamelCase )
for t in scheduler.timesteps:
__UpperCAmelCase : Dict = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = output.prev_sample
__UpperCAmelCase : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : str = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
| 382 | 0 |
from math import loga
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648 | 0 |
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
while a != 0:
UpperCAmelCase__ , UpperCAmelCase__ = b % a, a
return b
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) != 1:
UpperCAmelCase__ = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(UpperCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1, 0, a
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0, 1, m
while va != 0:
UpperCAmelCase__ = ua // va
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 392 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self : int ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowerCamelCase = 'A painting of a squirrel eating a burger'
lowerCamelCase = jax.device_count()
lowerCamelCase = num_samples * [prompt]
lowerCamelCase = sd_pipe.prepare_inputs(__snake_case )
lowerCamelCase = replicate(__snake_case )
lowerCamelCase = shard(__snake_case )
lowerCamelCase = jax.random.PRNGKey(0 )
lowerCamelCase = jax.random.split(__snake_case , jax.device_count() )
lowerCamelCase = sd_pipe(__snake_case , __snake_case , __snake_case , num_inference_steps=25 , jit=__snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase = images[0, 253:256, 253:256, -1]
lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = 'stabilityai/stable-diffusion-2'
lowerCamelCase , lowerCamelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
lowerCamelCase , lowerCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , revision='bf16' , dtype=jnp.bfloataa , )
lowerCamelCase = scheduler_params
lowerCamelCase = 'A painting of a squirrel eating a burger'
lowerCamelCase = jax.device_count()
lowerCamelCase = num_samples * [prompt]
lowerCamelCase = sd_pipe.prepare_inputs(__snake_case )
lowerCamelCase = replicate(__snake_case )
lowerCamelCase = shard(__snake_case )
lowerCamelCase = jax.random.PRNGKey(0 )
lowerCamelCase = jax.random.split(__snake_case , jax.device_count() )
lowerCamelCase = sd_pipe(__snake_case , __snake_case , __snake_case , num_inference_steps=25 , jit=__snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase = images[0, 253:256, 253:256, -1]
lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 246 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase : Tuple = datasets.logging.get_logger(__name__)
lowerCAmelCase : Dict = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
lowerCAmelCase : List[Any] = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
lowerCAmelCase : List[str] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def A_ ( a , a , a=False , a=False , a=True , a=False , a="dummy_doc" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {doc: key_lines}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = reader.get_doc_mentions(a , key_doc_lines[doc] , a )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(a , key_doc_lines[doc] , a , a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = reader.get_doc_mentions(a , sys_doc_lines[doc] , a )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : str = reader.set_annotated_parse_trees(a , key_doc_lines[doc] , a , a )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = reader.remove_nested_coref_mentions(a , a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.remove_nested_coref_mentions(a , a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ : str = reader.get_mention_assignments(a , a )
SCREAMING_SNAKE_CASE_ : Dict = reader.get_mention_assignments(a , a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'Number of resulting singleton clusters in the key '
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'files, respectively' )
return doc_coref_infos
def A_ ( a , a , a , a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(a , a , a , a , a , a )
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : str = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluator.evaluate_documents(a , a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(1_0 ) , f"Recall: {recall * 1_0_0:.2f}" , f" Precision: {precision * 1_0_0:.2f}" , f" F1: {fa * 1_0_0:.2f}" , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ : str = (conll / 3) * 1_0_0
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'conll_score': conll} )
return output_scores
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ : Optional[int] = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _A ( datasets.Metric):
def UpperCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ : Dict = util.check_gold_parse_annotation(_SCREAMING_SNAKE_CASE )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(
key_lines=_SCREAMING_SNAKE_CASE , sys_lines=_SCREAMING_SNAKE_CASE , metrics=_SCREAMING_SNAKE_CASE , NP_only=_SCREAMING_SNAKE_CASE , remove_nested=_SCREAMING_SNAKE_CASE , keep_singletons=_SCREAMING_SNAKE_CASE , min_span=_SCREAMING_SNAKE_CASE , )
return score
| 353 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=6.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="fp4" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = load_in_abit
SCREAMING_SNAKE_CASE_ : Tuple = load_in_abit
SCREAMING_SNAKE_CASE_ : Dict = llm_inta_threshold
SCREAMING_SNAKE_CASE_ : Tuple = llm_inta_skip_modules
SCREAMING_SNAKE_CASE_ : Optional[int] = llm_inta_enable_fpaa_cpu_offload
SCREAMING_SNAKE_CASE_ : Optional[Any] = llm_inta_has_fpaa_weight
SCREAMING_SNAKE_CASE_ : List[str] = bnb_abit_quant_type
SCREAMING_SNAKE_CASE_ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.floataa
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.dtype ):
SCREAMING_SNAKE_CASE_ : Optional[int] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def UpperCAmelCase ( self ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = cls(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for key, value in kwargs.items():
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
to_remove.append(_SCREAMING_SNAKE_CASE )
for key in to_remove:
kwargs.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
writer.write(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : str = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ):
"""simple docstring"""
return f"{self.__class__.__name__} {self.to_json_string()}"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = True ):
"""simple docstring"""
if use_diff is True:
SCREAMING_SNAKE_CASE_ : int = self.to_diff_dict()
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.to_dict()
return json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n"
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.to_dict()
# get the default config dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = BitsAndBytesConfig().to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
SCREAMING_SNAKE_CASE_ : int = value
return serializable_config_dict
| 353 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _UpperCAmelCase ( __a):
__a : Dict = """microsoft/speecht5_tts"""
__a : Any = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__a : Union[str, Any] = """text_reader"""
__a : Optional[int] = SpeechTaProcessor
__a : Dict = SpeechTaForTextToSpeech
__a : Optional[Any] = SpeechTaHifiGan
__a : Union[str, Any] = ["""text"""]
__a : str = ["""audio"""]
def __snake_case ( self ) -> int:
'''simple docstring'''
if self.post_processor is None:
_UpperCAmelCase : Dict = """microsoft/speecht5_hifigan"""
super().setup()
def __snake_case ( self , _A , _A=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.pre_processor(text=_A , return_tensors="""pt""" , truncation=_A )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_UpperCAmelCase : Tuple = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_UpperCAmelCase : List[str] = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , _A ) -> Dict:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_A )
def __snake_case ( self , _A ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_A ).cpu().detach()
| 238 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( __a):
__a : Optional[Any] = """SpeechT5FeatureExtractor"""
__a : Dict = """SpeechT5Tokenizer"""
def __init__( self , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_A , _A )
def __call__( self , *_A , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = kwargs.pop("""audio""" , _A )
_UpperCAmelCase : Tuple = kwargs.pop("""text""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""text_target""" , _A )
_UpperCAmelCase : Optional[Any] = kwargs.pop("""audio_target""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""sampling_rate""" , _A )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
_UpperCAmelCase : Optional[Any] = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
elif text is not None:
_UpperCAmelCase : List[str] = self.tokenizer(_A , **_A )
else:
_UpperCAmelCase : Optional[int] = None
if audio_target is not None:
_UpperCAmelCase : List[Any] = self.feature_extractor(audio_target=_A , *_A , sampling_rate=_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_values"""]
elif text_target is not None:
_UpperCAmelCase : Optional[int] = self.tokenizer(_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : List[str] = labels
_UpperCAmelCase : List[str] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[int] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = kwargs.pop("""input_values""" , _A )
_UpperCAmelCase : List[Any] = kwargs.pop("""input_ids""" , _A )
_UpperCAmelCase : int = kwargs.pop("""labels""" , _A )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
_UpperCAmelCase : Optional[int] = self.feature_extractor.pad(_A , *_A , **_A )
elif input_ids is not None:
_UpperCAmelCase : Tuple = self.tokenizer.pad(_A , **_A )
else:
_UpperCAmelCase : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_A , _A ) and "input_ids" in labels[0]):
_UpperCAmelCase : Optional[Any] = self.tokenizer.pad(_A , **_A )
_UpperCAmelCase : Optional[Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = self.feature_extractor.feature_size
_UpperCAmelCase : Tuple = self.feature_extractor.num_mel_bins
_UpperCAmelCase : List[str] = self.feature_extractor.pad(_A , *_A , **_A )
_UpperCAmelCase : List[Any] = feature_size_hack
_UpperCAmelCase : Dict = targets["""input_values"""]
else:
_UpperCAmelCase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : Union[str, Any] = labels
_UpperCAmelCase : Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[Any] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def __snake_case ( self , *_A , **_A ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
| 238 | 1 |
"""simple docstring"""
from torch import nn
def lowercase ( __UpperCamelCase ) -> int:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 190 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCamelCase = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def lowercase ( __UpperCamelCase=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = None
_lowerCamelCase = None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
with TemporaryDirectory() as tmp_dir:
__magic_name__ = dataset_module_factory(UpperCamelCase_ , cache_dir=UpperCamelCase_ )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase_ )
__magic_name__ = builder_cls(
cache_dir=UpperCamelCase_ , config_name=UpperCamelCase_ , hash=dataset_module.hash , )
__magic_name__ = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase_ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
__magic_name__ = cached_path(UpperCamelCase_ , cache_dir=UpperCamelCase_ )
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
@pytest.mark.integration
def lowercase ( __UpperCamelCase ) -> Tuple:
__magic_name__ = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
__magic_name__ = dataset_module_factory('''wikipedia''' , cache_dir=__UpperCamelCase )
__magic_name__ = import_main_class(dataset_module.module_path )
__magic_name__ = builder_cls(
cache_dir=__UpperCamelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__magic_name__ = None
builder_instance.download_and_prepare()
__magic_name__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowercase ( __UpperCamelCase ) -> Tuple:
__magic_name__ = dataset_module_factory('''wikipedia''' , cache_dir=__UpperCamelCase )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=__UpperCamelCase )
__magic_name__ = builder_cls(
cache_dir=__UpperCamelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
__magic_name__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert "train" in ds
assert isinstance(ds['''train'''] , __UpperCamelCase )
assert next(iter(ds['''train'''] ) )
| 190 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = 2
while True:
_UpperCAmelCase = factor_map.pop(UpperCamelCase__ , UpperCamelCase__ )
if factor:
_UpperCAmelCase = factor + prime
while x in factor_map:
x += factor
_UpperCAmelCase = factor
else:
_UpperCAmelCase = prime
yield prime
prime += 1
def __lowerCamelCase ( UpperCamelCase__ = 1E10 ):
"""simple docstring"""
_UpperCAmelCase = sieve()
_UpperCAmelCase = 1
while True:
_UpperCAmelCase = next(UpperCamelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 657 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , a_ , a_=2 , a_=3 , a_=4 , a_=2 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=36 , a_=3 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=6 , a_=6 , a_=3 , a_=4 , a_=None , a_=1000 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase = text_seq_length
_UpperCAmelCase = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> Dict:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
_UpperCAmelCase = LayoutLMvaModel(config=a_ )
model.to(a_ )
model.eval()
# text + image
_UpperCAmelCase = model(a_ , pixel_values=a_ )
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ , token_type_ids=a_ )
_UpperCAmelCase = model(a_ , bbox=a_ , pixel_values=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase = model(pixel_values=a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LayoutLMvaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
_UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase = model(
a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _a ( self , a_ , a_ , a_=False ) -> List[str]:
_UpperCAmelCase = copy.deepcopy(a_ )
if model_class in get_values(a_ ):
_UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a_ ):
_UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in get_values(a_ ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
elif model_class in [
*get_values(a_ ),
]:
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a_ , )
return inputs_dict
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
def _a ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _a ( self ) -> List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None
@slow
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(a_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ )
_UpperCAmelCase = torch.tensor([[1, 2]] )
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCAmelCase = model(
input_ids=input_ids.to(a_ ) , bbox=bbox.to(a_ ) , pixel_values=pixel_values.to(a_ ) , )
# verify the logits
_UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
_UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
| 657 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _lowerCAmelCase ( _lowerCAmelCase )-> int:
if not postfix_notation:
return 0
__UpperCAmelCase = {'+', '-', '*', '/'}
__UpperCAmelCase = []
for token in postfix_notation:
if token in operations:
__UpperCAmelCase , __UpperCAmelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_lowerCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A: str = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Union[str, Any] = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Optional[Any] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Dict = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Union[str, Any] = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_A: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 617 | 0 |
from math import pi
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 322 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int = 6 ):
"""simple docstring"""
_lowercase : Node | None = None
_lowercase : Node | None = None
self.create_linked_list(UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = Node()
_lowercase : Any = current_node
_lowercase : List[Any] = current_node
_lowercase : str = current_node
for _ in range(1 , UpperCamelCase ):
_lowercase : Any = Node()
_lowercase : Union[str, Any] = current_node
_lowercase : List[str] = previous_node
_lowercase : List[str] = current_node
_lowercase : str = self.front
_lowercase : Optional[int] = previous_node
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase_ ( self : Any , UpperCamelCase : Any ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowercase : List[str] = self.rear.next
if self.rear:
_lowercase : Union[str, Any] = data
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowercase : Optional[int] = self.front.data
_lowercase : Any = None
return data
_lowercase : Union[str, Any] = self.front
_lowercase : int = old_front.next
_lowercase : Any = old_front.data
_lowercase : Any = None
return data
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
if self.is_empty():
raise Exception('''Empty Queue''' )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : int ):
"""simple docstring"""
_lowercase : Any | None = None
_lowercase : Node | None = None
_lowercase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
snake_case_ = TypeVar('T')
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__(self : str , a__ : list[T] , a__ : Callable[[T, T], T] ):
"""simple docstring"""
__snake_case = None
__snake_case = len(a__ )
__snake_case = [any_type for _ in range(self.N )] + arr
__snake_case = fnc
self.build()
def a (self : Any ):
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
__snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def a (self : str , a__ : int , a__ : T ):
"""simple docstring"""
p += self.N
__snake_case = v
while p > 1:
__snake_case = p // 2
__snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def a (self : List[str] , a__ : int , a__ : int ): # noqa: E741
"""simple docstring"""
__snake_case , __snake_case = l + self.N, r + self.N
__snake_case = None
while l <= r:
if l % 2 == 1:
__snake_case = self.st[l] if res is None else self.fn(a__ , self.st[l] )
if r % 2 == 0:
__snake_case = self.st[r] if res is None else self.fn(a__ , self.st[r] )
__snake_case , __snake_case = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
snake_case_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
snake_case_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
snake_case_ = SegmentTree(test_array, min)
snake_case_ = SegmentTree(test_array, max)
snake_case_ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase__ ( ) -> None:
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
__snake_case = reduce(snake_case_ , test_array[i : j + 1] )
__snake_case = reduce(snake_case_ , test_array[i : j + 1] )
__snake_case = reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
snake_case_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 388 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : List[str] = inspect.getfile(accelerate.test_utils )
A_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
A_ : int = ['accelerate', 'launch']
A_ : Tuple = Path.home() / '.cache/huggingface/accelerate'
A_ : List[Any] = 'default_config.yaml'
A_ : Optional[Any] = config_folder / config_file
A_ : Union[str, Any] = config_folder / '_default_config.yaml'
A_ : int = Path('tests/test_configs' )
@classmethod
def a (cls : Any ):
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def a (cls : List[str] ):
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def a (self : str ):
"""simple docstring"""
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=a__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(a__ ), self.test_file_path] , env=os.environ.copy() )
def a (self : int ):
"""simple docstring"""
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : List[Any] = 'test-tpu'
A_ : List[str] = 'us-central1-a'
A_ : int = 'ls'
A_ : Tuple = ['accelerate', 'tpu-config']
A_ : Union[str, Any] = 'cd /usr/share'
A_ : int = 'tests/test_samples/test_command_file.sh'
A_ : int = 'Running gcloud compute tpus tpu-vm ssh'
def a (self : Any ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a__ , )
def a (self : str ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=a__ )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : str ):
"""simple docstring"""
__snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a__ , )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a__ , )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : Any ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : Any ):
"""simple docstring"""
__snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
| 388 | 1 |
'''simple docstring'''
import json
import sys
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple ) -> List[str]:
'''simple docstring'''
with open(snake_case_ , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
UpperCAmelCase_ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(snake_case_ ):
UpperCAmelCase_ = results[benchmark_name]
UpperCAmelCase_ = benchmark_name.split("/" )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
UpperCAmelCase_ = "| metric |"
UpperCAmelCase_ = "|--------|"
UpperCAmelCase_ = "| new / old (diff) |"
for metric_name in sorted(snake_case_ ):
UpperCAmelCase_ = benchmark_res[metric_name]
UpperCAmelCase_ = metric_vals["new"]
UpperCAmelCase_ = metric_vals.get("old" , snake_case_ )
UpperCAmelCase_ = metric_vals.get("diff" , snake_case_ )
UpperCAmelCase_ = f""" {new_val:f}""" if isinstance(snake_case_ , (int, float) ) else "None"
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(snake_case_ , (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(snake_case_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(snake_case_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] =sys.argv[1]
SCREAMING_SNAKE_CASE_: Dict =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 78 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( a ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(a ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
SCREAMING_SNAKE_CASE_ :Any = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
SCREAMING_SNAKE_CASE_ :str = PipelineDataFormat.from_str(
format=a , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(a , a )
class _UpperCAmelCase ( lowercase ):
def __init__( self : List[Any] , UpperCAmelCase : Pipeline , UpperCAmelCase : PipelineDataFormat):
SCREAMING_SNAKE_CASE_ :int = nlp
SCREAMING_SNAKE_CASE_ :Any = reader
@staticmethod
def _snake_case ( UpperCAmelCase : ArgumentParser):
SCREAMING_SNAKE_CASE_ :Any = parser.add_parser("run" , help="Run a pipeline through the CLI")
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run")
run_parser.add_argument("--input" , type=UpperCAmelCase , help="Path to the file to use for inference")
run_parser.add_argument("--output" , type=UpperCAmelCase , help="Path to the file that will be used post to write results.")
run_parser.add_argument("--model" , type=UpperCAmelCase , help="Name or path to the model to instantiate.")
run_parser.add_argument("--config" , type=UpperCAmelCase , help="Name or path to the model's config to instantiate.")
run_parser.add_argument(
"--tokenizer" , type=UpperCAmelCase , help="Name of the tokenizer to use. (default: same as the model name)")
run_parser.add_argument(
"--column" , type=UpperCAmelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=UpperCAmelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file.")
run_parser.set_defaults(func=UpperCAmelCase)
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE_ :int = nlp(**UpperCAmelCase) if self._reader.is_multi_columns else nlp(UpperCAmelCase)
if isinstance(UpperCAmelCase , UpperCAmelCase):
outputs.append(UpperCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE_ :Dict = self._reader.save_binary(UpperCAmelCase)
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}")
else:
self._reader.save(UpperCAmelCase)
| 631 | 0 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
super().__init__()
self.register_modules(vqvae=lowerCamelCase_ ,unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self ,lowerCamelCase_ = 1 ,lowerCamelCase_ = None ,lowerCamelCase_ = 0.0 ,lowerCamelCase_ = 5_0 ,lowerCamelCase_ = "pil" ,lowerCamelCase_ = True ,**lowerCamelCase_ ,) -> Union[Tuple, ImagePipelineOutput]:
A = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=lowerCamelCase_ ,)
A = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
A = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for t in self.progress_bar(self.scheduler.timesteps ):
A = self.scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
# predict the noise residual
A = self.unet(lowerCamelCase_ ,lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ).prev_sample
# decode the image latents with the VAE
A = self.vqvae.decode(lowerCamelCase_ ).sample
A = (image / 2 + 0.5).clamp(0 ,1 )
A = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
A = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 255 |
"""simple docstring"""
UpperCAmelCase =256
# Modulus to hash a string
UpperCAmelCase =1_000_003
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = len(_a )
A = len(_a )
if p_len > t_len:
return False
A = 0
A = 0
A = 1
# Calculating the hash of pattern and substring of text
for i in range(_a ):
A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ):
"""simple docstring"""
A = """abc1abc12"""
A = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_a , _a ) and not rabin_karp(_a , _a )
# Test 2)
A = """ABABX"""
A = """ABABZABABYABABX"""
assert rabin_karp(_a , _a )
# Test 3)
A = """AAAB"""
A = """ABAAAAAB"""
assert rabin_karp(_a , _a )
# Test 4)
A = """abcdabcy"""
A = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_a , _a )
# Test 5)
A = """Lü"""
A = """Lüsai"""
assert rabin_karp(_a , _a )
A = """Lue"""
assert not rabin_karp(_a , _a )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 255 | 1 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Any = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 5_02_57 , SCREAMING_SNAKE_CASE_ = 10_24 , SCREAMING_SNAKE_CASE_ = 7_68 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "gelu_new" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 1E-5 , SCREAMING_SNAKE_CASE_ = 0.0_2 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , ) -> Tuple:
super().__init__()
__lowerCamelCase : int = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
__lowerCamelCase : List[Any] = prefix_inner_dim
__lowerCamelCase : Any = prefix_hidden_dim
__lowerCamelCase : Dict = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__lowerCamelCase : Dict = (
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__lowerCamelCase : str = GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , n_positions=SCREAMING_SNAKE_CASE_ , n_embd=SCREAMING_SNAKE_CASE_ , n_layer=SCREAMING_SNAKE_CASE_ , n_head=SCREAMING_SNAKE_CASE_ , n_inner=SCREAMING_SNAKE_CASE_ , activation_function=SCREAMING_SNAKE_CASE_ , resid_pdrop=SCREAMING_SNAKE_CASE_ , embd_pdrop=SCREAMING_SNAKE_CASE_ , attn_pdrop=SCREAMING_SNAKE_CASE_ , layer_norm_epsilon=SCREAMING_SNAKE_CASE_ , initializer_range=SCREAMING_SNAKE_CASE_ , scale_attn_weights=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE_ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Dict = GPTaLMHeadModel(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> List[str]:
__lowerCamelCase : Any = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = self.encode_prefix(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = self.decode_prefix(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__lowerCamelCase : Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__lowerCamelCase : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
__lowerCamelCase : Union[str, Any] = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE_ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return self.encode_prefix(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : int = torch.split(SCREAMING_SNAKE_CASE_ , 1 , dim=0 )
__lowerCamelCase : List[str] = []
__lowerCamelCase : Tuple = []
for feature in features:
__lowerCamelCase : List[str] = self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE_ ) ) # back to the clip feature
# Only support beam search for now
__lowerCamelCase , __lowerCamelCase : int = self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__lowerCamelCase : Optional[Any] = torch.stack(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = torch.stack(SCREAMING_SNAKE_CASE_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = 5 , SCREAMING_SNAKE_CASE_ = 67 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Union[str, Any]:
__lowerCamelCase : Dict = eos_token_id
__lowerCamelCase : Dict = None
__lowerCamelCase : List[str] = None
__lowerCamelCase : Tuple = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.int )
__lowerCamelCase : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.bool )
if input_embeds is not None:
__lowerCamelCase : Any = input_embeds
else:
__lowerCamelCase : Dict = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Tuple = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = outputs.logits
__lowerCamelCase : Tuple = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__lowerCamelCase : Optional[int] = logits.softmax(-1 ).log()
if scores is None:
__lowerCamelCase , __lowerCamelCase : Dict = logits.topk(SCREAMING_SNAKE_CASE_ , -1 )
__lowerCamelCase : Union[str, Any] = generated.expand(SCREAMING_SNAKE_CASE_ , *generated.shape[1:] )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__lowerCamelCase : Optional[int] = next_tokens
else:
__lowerCamelCase : int = tokens.expand(SCREAMING_SNAKE_CASE_ , *tokens.shape[1:] )
__lowerCamelCase : Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
__lowerCamelCase : Optional[int] = -float(np.inf )
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__lowerCamelCase : int = scores_sum / seq_lengths[:, None]
__lowerCamelCase , __lowerCamelCase : Optional[int] = scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE_ , -1 )
__lowerCamelCase : int = next_tokens // scores_sum.shape[1]
__lowerCamelCase : List[str] = seq_lengths[next_tokens_source]
__lowerCamelCase : Optional[int] = next_tokens % scores_sum.shape[1]
__lowerCamelCase : Tuple = next_tokens.unsqueeze(1 )
__lowerCamelCase : List[str] = tokens[next_tokens_source]
__lowerCamelCase : Tuple = torch.cat((tokens, next_tokens) , dim=1 )
__lowerCamelCase : int = generated[next_tokens_source]
__lowerCamelCase : Optional[Any] = scores_sum_average * seq_lengths
__lowerCamelCase : str = is_stopped[next_tokens_source]
__lowerCamelCase : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__lowerCamelCase : Any = torch.cat((generated, next_token_embed) , dim=1 )
__lowerCamelCase : Dict = is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE_ ).squeeze()
if is_stopped.all():
break
__lowerCamelCase : Tuple = scores / seq_lengths
__lowerCamelCase : Tuple = scores.argsort(descending=SCREAMING_SNAKE_CASE_ )
# tokens tensors are already padded to max_seq_length
__lowerCamelCase : Union[str, Any] = [tokens[i] for i in order]
__lowerCamelCase : List[Any] = torch.stack(SCREAMING_SNAKE_CASE_ , dim=0 )
__lowerCamelCase : int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 13 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 31 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase : str = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 )
snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ : Tuple = Accelerator()
snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__A : List[Any] = logging.get_logger(__name__)
__A : int = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
__A : List[str] = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
__A : Optional[Any] = {
'jukebox': 5_1_2,
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=["v3", "v2", "v2"] , __lowerCamelCase : str=512 , __lowerCamelCase : str=5 , __lowerCamelCase : List[str]="<|endoftext|>" , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
super().__init__(
unk_token=__lowerCamelCase , n_genres=__lowerCamelCase , version=__lowerCamelCase , max_n_lyric_tokens=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = version
SCREAMING_SNAKE_CASE = max_n_lyric_tokens
SCREAMING_SNAKE_CASE = n_genres
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
SCREAMING_SNAKE_CASE = oov.replace(r"\-'" , r"\-+'" )
SCREAMING_SNAKE_CASE = regex.compile(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.artists_encoder.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.genres_encoder.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _snake_case ( self : str ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _snake_case ( self : Dict ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = [self.artists_encoder.get(__lowerCamelCase , 0 ) for artist in list_artists]
for genres in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE = [self.genres_encoder.get(__lowerCamelCase , 0 ) for genre in list_genres[genres]]
SCREAMING_SNAKE_CASE = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
SCREAMING_SNAKE_CASE = [[self.lyrics_encoder.get(__lowerCamelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _snake_case ( self : List[Any] , __lowerCamelCase : Any ):
return list(__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , **__lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_for_tokenization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self._tokenize(__lowerCamelCase )
return artist, genre, lyrics
def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
SCREAMING_SNAKE_CASE = artists[idx].lower()
SCREAMING_SNAKE_CASE = [genres[idx].lower()]
else:
SCREAMING_SNAKE_CASE = self._normalize(artists[idx] ) + ".v2"
SCREAMING_SNAKE_CASE = [
self._normalize(__lowerCamelCase ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
SCREAMING_SNAKE_CASE = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
SCREAMING_SNAKE_CASE = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
SCREAMING_SNAKE_CASE = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) + 1
SCREAMING_SNAKE_CASE = self.vocab
SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
SCREAMING_SNAKE_CASE = self._run_strip_accents(__lowerCamelCase )
SCREAMING_SNAKE_CASE = lyrics.replace("\\" , "\n" )
SCREAMING_SNAKE_CASE = self.out_of_vocab.sub("" , __lowerCamelCase ), [], []
return artists, genres, lyrics
def _snake_case ( self : Any , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = unicodedata.normalize("NFD" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for char in text:
SCREAMING_SNAKE_CASE = unicodedata.category(__lowerCamelCase )
if cat == "Mn":
continue
output.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = (
[chr(__lowerCamelCase ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
SCREAMING_SNAKE_CASE = frozenset(__lowerCamelCase )
SCREAMING_SNAKE_CASE = re.compile(r"_+" )
SCREAMING_SNAKE_CASE = "".join([c if c in accepted else "_" for c in text.lower()] )
SCREAMING_SNAKE_CASE = pattern.sub("_" , __lowerCamelCase ).strip("_" )
return text
def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ):
return " ".join(__lowerCamelCase )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : bool = False ):
# Convert to TensorType
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = TensorType(__lowerCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
SCREAMING_SNAKE_CASE = tf.constant
SCREAMING_SNAKE_CASE = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
SCREAMING_SNAKE_CASE = torch.tensor
SCREAMING_SNAKE_CASE = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
SCREAMING_SNAKE_CASE = jnp.array
SCREAMING_SNAKE_CASE = _is_jax
else:
SCREAMING_SNAKE_CASE = np.asarray
SCREAMING_SNAKE_CASE = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
SCREAMING_SNAKE_CASE = [inputs]
if not is_tensor(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = as_tensor(__lowerCamelCase )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str="" , __lowerCamelCase : Union[str, Any]="pt" ):
SCREAMING_SNAKE_CASE = [0, 0, 0]
SCREAMING_SNAKE_CASE = [artist] * len(self.version )
SCREAMING_SNAKE_CASE = [genres] * len(self.version )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.tokenize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._convert_token_to_id(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [-INFINITY] * len(full_tokens[-1] )
SCREAMING_SNAKE_CASE = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def _snake_case ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def _snake_case ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = self.artists_decoder.get(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index]
SCREAMING_SNAKE_CASE = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index]
return artist, genres, lyrics
| 16 |
from __future__ import annotations
def lowercase ( __A : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
_lowerCAmelCase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
_lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_lowerCAmelCase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCAmelCase : bool = field(
default=lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.task_name.lower()
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = "train"
_lowerCAmelCase : Union[str, Any] = "dev"
_lowerCAmelCase : List[Any] = "test"
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : GlueDataTrainingArguments
_lowerCAmelCase : str
_lowerCAmelCase : List[InputFeatures]
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = Split.train , UpperCAmelCase__ = None , ):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE__ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
SCREAMING_SNAKE_CASE__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
SCREAMING_SNAKE_CASE__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = torch.load(UpperCAmelCase__ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
SCREAMING_SNAKE_CASE__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE__ = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE__ = examples[:limit_length]
SCREAMING_SNAKE_CASE__ = glue_convert_examples_to_features(
UpperCAmelCase__ , UpperCAmelCase__ , max_length=args.max_seq_length , label_list=UpperCAmelCase__ , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE__ = time.time()
torch.save(self.features , UpperCAmelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase__ ):
return self.features[i]
def lowerCAmelCase__ ( self ):
return self.label_list
| 112 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 76 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 76 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __A :
def __init__( self :int , __snake_case :Any , __snake_case :Dict=13 , __snake_case :Tuple=7 , __snake_case :Tuple=False , __snake_case :str=True , __snake_case :str=False , __snake_case :Union[str, Any]=False , __snake_case :int=19 , __snake_case :Optional[Any]=32 , __snake_case :Optional[Any]=5 , __snake_case :Tuple=4 , __snake_case :List[Any]=37 , __snake_case :List[Any]="gelu" , __snake_case :Dict=0.1 , __snake_case :Any=0.1 , __snake_case :Optional[Any]=5_12 , __snake_case :Tuple=16 , __snake_case :int=2 , __snake_case :List[str]=0.02 , __snake_case :List[Any]=3 , __snake_case :Dict=4 , __snake_case :str=None , ):
'''simple docstring'''
__magic_name__ : str =parent
__magic_name__ : Dict =batch_size
__magic_name__ : Optional[Any] =seq_length
__magic_name__ : str =is_training
__magic_name__ : Optional[int] =use_input_mask
__magic_name__ : List[Any] =use_token_type_ids
__magic_name__ : Dict =use_labels
__magic_name__ : int =vocab_size
__magic_name__ : Union[str, Any] =hidden_size
__magic_name__ : str =num_hidden_layers
__magic_name__ : Union[str, Any] =num_attention_heads
__magic_name__ : Any =intermediate_size
__magic_name__ : Tuple =hidden_act
__magic_name__ : int =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : int =max_position_embeddings
__magic_name__ : Optional[Any] =type_vocab_size
__magic_name__ : Optional[Any] =type_sequence_label_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : str =num_labels
__magic_name__ : str =num_choices
__magic_name__ : int =scope
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : int =None
if self.use_input_mask:
__magic_name__ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : int =None
__magic_name__ : str =None
__magic_name__ : Dict =None
if self.use_labels:
__magic_name__ : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Tuple =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__A , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def A__ ( self :Tuple , __snake_case :Tuple , __snake_case :Any , __snake_case :Union[str, Any] , __snake_case :Tuple , __snake_case :str , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Tuple =EsmForProteinFolding(config=__A ).float()
model.to(__A )
model.eval()
__magic_name__ : List[str] =model(__A , attention_mask=__A )
__magic_name__ : str =model(__A )
__magic_name__ : str =model(__A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : str =self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[Any] =config_and_inputs
__magic_name__ : Optional[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase = False
UpperCamelCase = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCamelCase = ()
UpperCamelCase = {} if is_torch_available() else {}
UpperCamelCase = False
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Any =EsmFoldModelTester(self )
__magic_name__ : Optional[Any] =ConfigTester(self , config_class=__A , hidden_size=37 )
def A__ ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@unittest.skip("""Does not support attention outputs""" )
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def A__ ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def A__ ( self :str ):
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""This test doesn\'t work for ESMFold and doesn\'t test core functionality""" )
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" )
def A__ ( self :str ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support data parallel.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@require_torch
class __A ( __lowercase ):
@slow
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : str =EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
__magic_name__ : List[Any] =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__magic_name__ : Optional[int] =model(__A )["""positions"""]
__magic_name__ : Tuple =torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __A , atol=1E-4 ) )
| 714 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase_ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCAmelCase_ ( lowerCamelCase ):
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__magic_name__ : Tuple =tmp_path_factory.getbasetemp() / """cache"""
__magic_name__ : Optional[int] =test_hf_cache_home / """datasets"""
__magic_name__ : List[str] =test_hf_cache_home / """metrics"""
__magic_name__ : List[str] =test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowerCamelCase ) )
__magic_name__ : Optional[Any] =test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowerCamelCase ) )
__magic_name__ : Tuple =test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCamelCase ) )
@pytest.fixture(autouse=lowerCamelCase , scope="""session""" )
def lowerCAmelCase_ ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( lowerCamelCase ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowerCamelCase )
| 367 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: Union[str, Any] = logging.get_logger(__name__)
A__: List[Any] = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Optional[Any] = "xmod"
def __init__( self :Dict , SCREAMING_SNAKE_CASE :List[str]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Tuple=1_2 , SCREAMING_SNAKE_CASE :Optional[int]=1_2 , SCREAMING_SNAKE_CASE :Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Dict=0.1 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :Optional[Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :Tuple=1 , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :Optional[Any]="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :Optional[int]=2 , SCREAMING_SNAKE_CASE :Dict=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :Union[str, Any]=("en_XX",) , SCREAMING_SNAKE_CASE :Optional[int]=None , **SCREAMING_SNAKE_CASE :Any , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : List[str] =vocab_size
_a : Tuple =hidden_size
_a : str =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Dict =hidden_act
_a : Union[str, Any] =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : str =attention_probs_dropout_prob
_a : Tuple =max_position_embeddings
_a : Optional[Any] =type_vocab_size
_a : Any =initializer_range
_a : List[str] =layer_norm_eps
_a : Dict =position_embedding_type
_a : List[str] =use_cache
_a : Any =classifier_dropout
_a : Optional[Any] =pre_norm
_a : Dict =adapter_reduction_factor
_a : List[str] =adapter_layer_norm
_a : Union[str, Any] =adapter_reuse_layer_norm
_a : List[Any] =ln_before_adapter
_a : Tuple =list(SCREAMING_SNAKE_CASE )
_a : List[str] =default_language
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : List[str] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : Union[str, Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 694 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[int] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : List[str] = "ctrl"
UpperCamelCase : List[Any] = ["past_key_values"]
UpperCamelCase : int = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __UpperCAmelCase=2_4_6_5_3_4 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=1_2_8_0 , __UpperCAmelCase=8_1_9_2 , __UpperCAmelCase=4_8 , __UpperCAmelCase=1_6 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , **__UpperCAmelCase , ):
"""simple docstring"""
__lowercase = vocab_size
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = dff
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
super().__init__(**__UpperCAmelCase )
| 339 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Dict = MobileBertTokenizer
UpperCamelCase : Optional[int] = MobileBertTokenizerFast
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : int = True
UpperCamelCase : Dict = filter_non_english
UpperCamelCase : Any = "google/mobilebert-uncased"
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __magic_name__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """UNwant\u00E9d,running"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# With lower casing
__lowercase = self.get_tokenizer(do_lower_case=__UpperCAmelCase )
__lowercase = self.get_rust_tokenizer(do_lower_case=__UpperCAmelCase )
__lowercase = """UNwant\u00E9d,running"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowercase = {}
for i, token in enumerate(__UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __magic_name__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__lowercase = tokenizer_r.encode_plus(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , )
__lowercase = tokenizer_r.do_lower_case if hasattr(__UpperCAmelCase , """do_lower_case""" ) else False
__lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""的""", """人""", """有"""]
__lowercase = """""".join(__UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = True
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = False
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__lowercase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__UpperCAmelCase )
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 339 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
A : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
A : Tuple = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
A : int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __lowerCAmelCase ( a__ ) -> dict[str, int]:
__a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowerCAmelCase ( a__ ) -> str:
return x[0]
def __lowerCAmelCase ( a__ ) -> str:
__a = get_letter_count(a__ )
__a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a__ )
__a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a__ )
__a = ''''''.join(freq_to_letter[freq] )
__a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a__ , reverse=a__ )
__a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a__ )
def __lowerCAmelCase ( a__ ) -> int:
__a = get_frequency_order(a__ )
__a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 |
def __lowerCAmelCase ( a__ , a__ ) -> None:
__a = len(a__ )
print('''The following activities are selected:''' )
# The first activity is always selected
__a = 0
print(a__ , end=''',''' )
# Consider rest of the activities
for j in range(a__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a__ , end=''',''' )
__a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Tuple = [1, 3, 0, 5, 8, 5]
A : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 219 | 1 |
"""simple docstring"""
from torch import nn
def A_ ( __lowercase ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 703 |
"""simple docstring"""
def A_ ( __lowercase = 10 ):
if not isinstance(__lowercase , __lowercase ) or n < 0:
raise ValueError('Invalid input' )
UpperCamelCase_ : int =10**n
UpperCamelCase_ : List[str] =2_84_33 * (pow(2 , 7_83_04_57 , __lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 395 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
'''simple docstring'''
UpperCAmelCase__ = right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 603 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : List[str]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
UpperCAmelCase__ = AutoModel.from_pretrained(_UpperCAmelCase , return_dict=_UpperCAmelCase )
UpperCAmelCase__ = torch.nn.CosineSimilarity(3 , 1E-08 )
UpperCAmelCase__ = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
return self.bert(**_UpperCAmelCase ).last_hidden_state
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : List[str] ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(_UpperCAmelCase , _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = W_supports["""sizes"""].tolist()
UpperCAmelCase__ = W_supports["""start_token_id"""].item()
UpperCAmelCase__ = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase__ = self.BERT(**_UpperCAmelCase )
UpperCAmelCase__ = self.BERT(**_UpperCAmelCase )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = W_supports["""input_ids"""] == start_token_id
UpperCAmelCase__ = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(_UpperCAmelCase ):
if i == 0:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = support_sizes[i - 1]
UpperCAmelCase__ = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase__ = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase__ = torch.vstack((p_starts, p_start) )
UpperCAmelCase__ = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase__ = p_start
UpperCAmelCase__ = p_end
return p_starts, p_ends
| 603 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 1_6
__a = 3_2
def a ( snake_case__: Accelerator , snake_case__: DatasetDict , snake_case__: List[int] , snake_case__: List[int] , snake_case__: int = 16 ):
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase_ = DatasetDict(
{
'''train''': dataset['''train'''].select(snake_case__ ),
'''validation''': dataset['''train'''].select(snake_case__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(snake_case__: str ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__: List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ = 16
elif accelerator.mixed_precision != "no":
lowercase_ = 8
else:
lowercase_ = None
return tokenizer.pad(
snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowercase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowercase_ = DataLoader(
tokenized_datasets['''test'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader, test_dataloader
def a ( snake_case__: Tuple , snake_case__: Tuple ):
'''simple docstring'''
# New Code #
lowercase_ = []
# Download the dataset
lowercase_ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
lowercase_ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowercase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ = config['''lr''']
lowercase_ = int(config['''num_epochs'''] )
lowercase_ = int(config['''seed'''] )
lowercase_ = int(config['''batch_size'''] )
lowercase_ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowercase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase_ = batch_size // MAX_GPU_BATCH_SIZE
lowercase_ = MAX_GPU_BATCH_SIZE
set_seed(snake_case__ )
# New Code #
# Create our folds:
lowercase_ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
lowercase_ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(snake_case__ ):
lowercase_ , lowercase_ , lowercase_ = get_fold_dataloaders(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
lowercase_ = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.loss
lowercase_ = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowercase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , snake_case__ )
# New Code #
# We also run predictions on the test set at the very end
lowercase_ = []
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits
lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(snake_case__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowercase_ = torch.cat(snake_case__ , dim=0 )
lowercase_ = torch.stack(snake_case__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowercase_ = metric.compute(predictions=snake_case__ , references=snake_case__ )
accelerator.print('''Average test metrics from all folds:''' , snake_case__ )
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=snake_case__ , default=3 , help='''The number of splits to perform across the dataset''' )
lowercase_ = parser.parse_args()
lowercase_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 409 |
def a ( snake_case__: int ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
lowercase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase_ = 1
if upper_limit > 0:
lowercase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__a = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 409 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__=-1 ) -> Optional[int]:
# in NER datasets, the last column is usually reserved for NER label
A__ = label_idx
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = mode.value
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , f"""{mode}.txt""" )
A__ = 1
A__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
A__ = []
A__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
A__ = []
A__ = []
else:
A__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = mode.value
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , f"""{mode}.txt""" )
A__ = 1
A__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
A__ = []
A__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
A__ = preds_list[example_id]
A__ = ""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 104 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase__ : list[list[int]] ) -> bool:
_SCREAMING_SNAKE_CASE : int = len(lowerCamelCase__ )
# We need to create solution object to save path.
_SCREAMING_SNAKE_CASE : Dict = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
_SCREAMING_SNAKE_CASE : str = run_maze(lowerCamelCase__, 0, 0, lowerCamelCase__ )
if solved:
print("\n".join(str(lowerCamelCase__ ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _lowerCAmelCase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : list[list[int]] ) -> bool:
_SCREAMING_SNAKE_CASE : Tuple = len(lowerCamelCase__ )
# Final check point.
if i == j == (size - 1):
_SCREAMING_SNAKE_CASE : List[str] = 1
return True
_SCREAMING_SNAKE_CASE : Tuple = (not i < 0) and (not j < 0) # Check lower bounds
_SCREAMING_SNAKE_CASE : str = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_SCREAMING_SNAKE_CASE : Optional[int] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_SCREAMING_SNAKE_CASE : List[Any] = 1
# check for directions
if (
run_maze(lowerCamelCase__, i + 1, lowerCamelCase__, lowerCamelCase__ )
or run_maze(lowerCamelCase__, lowerCamelCase__, j + 1, lowerCamelCase__ )
or run_maze(lowerCamelCase__, i - 1, lowerCamelCase__, lowerCamelCase__ )
or run_maze(lowerCamelCase__, lowerCamelCase__, j - 1, lowerCamelCase__ )
):
return True
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
def __init__( self , a__ = 1_01 ):
_lowerCamelCase = length
def __len__( self ):
return self.length
def __getitem__( self , a__ ):
return i
class __magic_name__ :
"""simple docstring"""
def __call__( self , a__ ):
return {"input_ids": torch.tensor(a__ ), "labels": torch.tensor(a__ )}
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCamelCase = nn.Linear(1_20 , 80 )
def _UpperCAmelCase ( self , a__ , a__=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
@require_torch_neuroncore
def _UpperCAmelCase ( self ):
_lowerCamelCase = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = f'''--output_dir {output_dir}'''.split()
_lowerCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(a__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def _UpperCAmelCase ( self ):
_lowerCamelCase = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = f'''--output_dir {output_dir}'''.split()
_lowerCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(a__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_UpperCAmelCase = HfArgumentParser((TrainingArguments,))
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_UpperCAmelCase = DummyDataset(dataset_length)
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = list(range(len(_a ) ) )
_lowerCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
_UpperCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_UpperCAmelCase = 2
_UpperCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_UpperCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_UpperCAmelCase = None
| 297 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=4_00 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=True , a__=1 / 2_55 , a__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def _UpperCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , a__ , a__=False ):
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(a__ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(a__ , key=lambda a__ : item[0] )[0]
_lowerCamelCase = max(a__ , key=lambda a__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = DetaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ):
_lowerCamelCase = DetaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''do_rescale''' ) )
self.assertTrue(hasattr(a__ , '''do_pad''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , a__ )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
_lowerCamelCase = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(a__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(a__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(a__ , batched=a__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ):
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = DetaImageProcessor()
_lowerCamelCase = image_processing(images=a__ , annotations=a__ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
@slow
def _UpperCAmelCase ( self ):
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = DetaImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , a__ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , a__ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , a__ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , a__ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , a__ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , a__ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , a__ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , a__ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , a__ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , a__ ) )
| 297 | 1 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class __A :
def __init__( self : str , __snake_case : list[tuple[float, float]] ) -> List[Any]:
__magic_name__: int = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__: Union[str, Any] = len(__snake_case ) - 1
def lowerCamelCase__ ( self : int , __snake_case : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__: list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __snake_case ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__snake_case ) , 5 ) == 1
return output_values
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__: Any = self.basis_function(__snake_case )
__magic_name__: str = 0.0
__magic_name__: str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase__ ( self : str , __snake_case : float = 0.01 ) -> Optional[Any]:
from matplotlib import pyplot as plt # type: ignore
__magic_name__: list[float] = [] # x coordinates of points to plot
__magic_name__: list[float] = [] # y coordinates of points to plot
__magic_name__: Union[str, Any] = 0.0
while t <= 1:
__magic_name__: Tuple = self.bezier_curve_function(__snake_case )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__: Optional[Any] = [i[0] for i in self.list_of_points]
__magic_name__: Any = [i[1] for i in self.list_of_points]
plt.plot(
__snake_case , __snake_case , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(__snake_case , __snake_case , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 96 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __a ( __UpperCamelCase ):
__snake_case : Any = """table-transformer"""
__snake_case : Optional[Any] = ["""past_key_values"""]
__snake_case : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : int , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=3 , UpperCAmelCase : str=1_00 , UpperCAmelCase : int=6 , UpperCAmelCase : Dict=20_48 , UpperCAmelCase : Any=8 , UpperCAmelCase : str=6 , UpperCAmelCase : Any=20_48 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : List[str]=True , UpperCAmelCase : int="relu" , UpperCAmelCase : Tuple=2_56 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Optional[int]=1.0 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple="sine" , UpperCAmelCase : Tuple="resnet50" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=False , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Union[str, Any]=0.1 , **UpperCAmelCase : List[str] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = backbone_config.get("""model_type""" )
lowerCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : Optional[Any] = config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = None, None, None
lowerCAmelCase_ : Any = use_timm_backbone
lowerCAmelCase_ : Any = backbone_config
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Optional[int] = num_queries
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCAmelCase_ : List[str] = encoder_layers
lowerCAmelCase_ : Any = encoder_attention_heads
lowerCAmelCase_ : int = decoder_ffn_dim
lowerCAmelCase_ : List[Any] = decoder_layers
lowerCAmelCase_ : str = decoder_attention_heads
lowerCAmelCase_ : List[str] = dropout
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : Any = activation_dropout
lowerCAmelCase_ : Optional[Any] = activation_function
lowerCAmelCase_ : List[Any] = init_std
lowerCAmelCase_ : List[str] = init_xavier_std
lowerCAmelCase_ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase_ : Any = decoder_layerdrop
lowerCAmelCase_ : Tuple = encoder_layers
lowerCAmelCase_ : str = auxiliary_loss
lowerCAmelCase_ : Union[str, Any] = position_embedding_type
lowerCAmelCase_ : List[Any] = backbone
lowerCAmelCase_ : Tuple = use_pretrained_backbone
lowerCAmelCase_ : Tuple = dilation
# Hungarian matcher
lowerCAmelCase_ : List[Any] = class_cost
lowerCAmelCase_ : List[Any] = bbox_cost
lowerCAmelCase_ : Optional[int] = giou_cost
# Loss coefficients
lowerCAmelCase_ : Dict = mask_loss_coefficient
lowerCAmelCase_ : Any = dice_loss_coefficient
lowerCAmelCase_ : List[str] = bbox_loss_coefficient
lowerCAmelCase_ : List[str] = giou_loss_coefficient
lowerCAmelCase_ : Dict = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def A ( self : int ):
return self.d_model
class __a ( __UpperCamelCase ):
__snake_case : int = version.parse("""1.11""" )
@property
def A ( self : List[str] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def A ( self : Dict ):
return 1e-5
@property
def A ( self : Dict ):
return 12
| 600 | 0 |
'''simple docstring'''
import baseaa
def _SCREAMING_SNAKE_CASE ( A : str ) -> int:
return baseaa.baaencode(string.encode('utf-8' ) )
def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> List[str]:
return baseaa.baadecode(A ).decode('utf-8' )
if __name__ == "__main__":
__A = '''Hello World!'''
__A = baseaa_encode(test)
print(encoded)
__A = baseaa_decode(encoded)
print(decoded)
| 714 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger()
def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A )
else:
__snake_case : Tuple = timm.create_model('levit_128' , pretrained=A )
if hidden_sizes == 1_92:
__snake_case : int = timm.create_model('levit_192' , pretrained=A )
if hidden_sizes == 2_56:
__snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A )
if hidden_sizes == 3_84:
__snake_case : int = timm.create_model('levit_384' , pretrained=A )
from_model.eval()
__snake_case : str = LevitForImageClassificationWithTeacher(A ).eval()
__snake_case : int = OrderedDict()
__snake_case : Optional[Any] = from_model.state_dict()
__snake_case : Tuple = list(from_model.state_dict().keys() )
__snake_case : List[str] = list(our_model.state_dict().keys() )
print(len(A ) , len(A ) )
for i in range(len(A ) ):
__snake_case : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(A )
__snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) )
__snake_case : Union[str, Any] = from_model(A )
__snake_case : List[str] = our_model(A ).logits
assert torch.allclose(A , A ), "The model logits don't match the original one."
__snake_case : int = name
print(A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__snake_case : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = 'imagenet-1k-id2label.json'
__snake_case : Tuple = 10_00
__snake_case : Dict = (1, num_labels)
__snake_case : List[str] = 'huggingface/label-files'
__snake_case : Any = num_labels
__snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(A ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A )
__snake_case : Dict = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__snake_case : Union[str, Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A )
return config, expected_shape
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
__A = parser.parse_args()
__A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 61 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : str = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
_lowerCamelCase : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : Union[str, Any] = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(__A )["last_hidden_state"].detach()
self.assertEqual(output.shape,__A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1],__A,atol=1e-3 ) )
@slow
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
_lowerCamelCase : int = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : List[str] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : Tuple = model(__A )["last_hidden_state"].detach()
self.assertEqual(output.shape,__A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1],__A,atol=1e-3 ) )
| 44 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
a : Optional[int] = '''naver-clova-ix/donut-base'''
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Dict:
a : Optional[Any] = DonutProcessor.from_pretrained(lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Optional[Any] = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
a : Tuple = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
a : List[Any] = self.processor.tokenajson(lowerCAmelCase__ )
self.assertDictEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 633 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
snake_case_ , snake_case_ = {}, {}
if padding is not None:
snake_case_ = padding
if truncation is not None:
snake_case_ = truncation
if top_k is not None:
snake_case_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (Image.Image, str) ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = {'''image''': image, '''question''': question}
else:
snake_case_ = image
snake_case_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ):
snake_case_ = load_image(inputs['''image'''] )
snake_case_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )
snake_case_ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
model_inputs.update(_UpperCAmelCase )
return model_inputs
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = self.model(**_UpperCAmelCase )
return model_outputs
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=5 ):
if top_k > self.model.config.num_labels:
snake_case_ = self.model.config.num_labels
if self.framework == "pt":
snake_case_ = model_outputs.logits.sigmoid()[0]
snake_case_ , snake_case_ = probs.topk(_UpperCAmelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case_ = scores.tolist()
snake_case_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 531 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCAmelCase = get_logger()
UpperCAmelCase = None
class lowerCAmelCase_ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
super().__init__(features=_UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(_UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
snake_case_ = device if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
snake_case_ = str(jax.devices()[0] )
snake_case_ = jnp_array_kwargs
@staticmethod
def UpperCamelCase__ ( ):
import jax
return {str(_UpperCAmelCase ): device for device in jax.devices()}
def UpperCamelCase__ ( self , _UpperCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and column:
if all(
isinstance(_UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_UpperCAmelCase , axis=0 )
return column
def UpperCamelCase__ ( self , _UpperCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(_UpperCAmelCase , (str, bytes, type(_UpperCAmelCase )) ):
return value
elif isinstance(_UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case_ = {}
if isinstance(_UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case_ = {'''dtype''': jnp.intaa}
else:
snake_case_ = {'''dtype''': jnp.intaa}
elif isinstance(_UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case_ = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
snake_case_ = np.asarray(_UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_UpperCAmelCase , '''__array__''' ) and not isinstance(_UpperCAmelCase , jax.Array ):
snake_case_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return map_nested(self._recursive_tensorize , _UpperCAmelCase , map_list=_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = self.numpy_arrow_extractor().extract_row(_UpperCAmelCase )
snake_case_ = self.python_features_decoder.decode_row(_UpperCAmelCase )
return self.recursive_tensorize(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = self.numpy_arrow_extractor().extract_column(_UpperCAmelCase )
snake_case_ = self.python_features_decoder.decode_column(_UpperCAmelCase , pa_table.column_names[0] )
snake_case_ = self.recursive_tensorize(_UpperCAmelCase )
snake_case_ = self._consolidate(_UpperCAmelCase )
return column
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = self.numpy_arrow_extractor().extract_batch(_UpperCAmelCase )
snake_case_ = self.python_features_decoder.decode_batch(_UpperCAmelCase )
snake_case_ = self.recursive_tensorize(_UpperCAmelCase )
for column_name in batch:
snake_case_ = self._consolidate(batch[column_name] )
return batch
| 531 | 1 |
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__lowerCamelCase = 0
__lowerCamelCase = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
__lowerCamelCase = [int(__lowerCAmelCase ) for i in num_string]
__lowerCamelCase = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
__lowerCamelCase = str(__lowerCAmelCase )
steps += 1
return steps
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__lowerCamelCase = 0
__lowerCamelCase = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
__lowerCamelCase = [int(__lowerCAmelCase ) for i in num_string]
__lowerCamelCase = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
__lowerCamelCase = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
def __init__( self : Optional[int] ) -> Optional[int]:
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 2_56
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
__lowerCamelCase = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
__lowerCamelCase = copy.deepcopy(self.img )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
__lowerCamelCase = np.sum(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCamelCase = x[i] / self.k
self.sk += prk
__lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCamelCase = int(last % last )
__lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
__lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __A ( self : str ) -> Union[str, Any]:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
SCREAMING_SNAKE_CASE__ : List[str] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 298 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a : Optional[int] = logging.get_logger(__name__)
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
UpperCAmelCase__ = set()
UpperCAmelCase__ = []
def parse_line(_SCREAMING_SNAKE_CASE ):
for line in fp:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(_SCREAMING_SNAKE_CASE ) > 0:
UpperCAmelCase__ = """\n""".join(_SCREAMING_SNAKE_CASE )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(_SCREAMING_SNAKE_CASE )
buffer.clear()
continue
else:
UpperCAmelCase__ = line.strip()
buffer.append(_SCREAMING_SNAKE_CASE )
if from_gh:
for filename in os.listdir(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with open(_SCREAMING_SNAKE_CASE ) as fp:
parse_line(_SCREAMING_SNAKE_CASE )
else:
try:
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_SCREAMING_SNAKE_CASE ) as fp:
parse_line(_SCREAMING_SNAKE_CASE )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
UpperCAmelCase__ = set()
UpperCAmelCase__ = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return selected_warnings
if __name__ == "__main__":
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->List[Any]:
return values.split(""",""" )
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
a : Union[str, Any] = parser.parse_args()
a : str = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a : int = extract_warnings(args.output_dir, args.targets)
a : Union[str, Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 701 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->str:
UpperCAmelCase__ = OmegaConf.load(_SCREAMING_SNAKE_CASE )
if display:
print(yaml.dump(OmegaConf.to_container(_SCREAMING_SNAKE_CASE ) ) )
return config
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if conf_path is None:
UpperCAmelCase__ = """./model_checkpoints/vqgan_only.yaml"""
UpperCAmelCase__ = load_config(_SCREAMING_SNAKE_CASE , display=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase__ = """./model_checkpoints/vqgan_only.pt"""
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
if ".ckpt" in ckpt_path:
UpperCAmelCase__ = sd["""state_dict"""]
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
del sd
return model
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = model.encode(_SCREAMING_SNAKE_CASE )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
UpperCAmelCase__ = model.decode(_SCREAMING_SNAKE_CASE )
return xrec
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->int:
UpperCAmelCase__ , UpperCAmelCase__ = string.rsplit(""".""" , 1 )
if reload:
UpperCAmelCase__ = importlib.import_module(_SCREAMING_SNAKE_CASE )
importlib.reload(_SCREAMING_SNAKE_CASE )
return getattr(importlib.import_module(_SCREAMING_SNAKE_CASE , package=_SCREAMING_SNAKE_CASE ) , cls )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->str:
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True ) ->str:
UpperCAmelCase__ = instantiate_from_config(_SCREAMING_SNAKE_CASE )
if sd is not None:
model.load_state_dict(_SCREAMING_SNAKE_CASE )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
# load the specified checkpoint
if ckpt:
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
UpperCAmelCase__ = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
UpperCAmelCase__ = {"""state_dict""": None}
UpperCAmelCase__ = None
UpperCAmelCase__ = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_SCREAMING_SNAKE_CASE , eval_mode=_SCREAMING_SNAKE_CASE )["""model"""]
return model, global_step
| 422 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[Any] = FunnelTokenizer
lowercase_ : Union[str, Any] = FunnelTokenizerFast
lowercase_ : List[Any] = True
lowercase_ : Optional[int] = True
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().setUp()
_lowercase : Optional[int] = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def UpperCamelCase ( self, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Any:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = 'UNwant\u00E9d,running'
_lowercase : str = 'unwanted, running'
return input_text, output_text
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.tokenizer_class(self.vocab_file)
_lowercase : int = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(lowerCamelCase, ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase), [7, 4, 5, 10, 8, 9])
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.get_tokenizers(do_lower_case=lowerCamelCase)
for tokenizer in tokenizers:
_lowercase : List[Any] = tokenizer('UNwant\u00E9d,running')
_lowercase : List[Any] = len(inputs['input_ids']) - 1
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len)
_lowercase : Union[str, Any] = tokenizer('UNwant\u00E9d,running', 'UNwant\u00E9d,running')
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len + [1] * sentence_len)
| 89 |
'''simple docstring'''
from math import ceil
def A__ ( UpperCAmelCase_ = 1_0_0_1 ):
_UpperCamelCase : int = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_UpperCamelCase : Dict = 2 * i + 1
_UpperCamelCase : Tuple = 2 * i
_UpperCamelCase : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
snake_case_ : Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 195 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ) -> List[str]:
A = 1
A = 3
A = (32, 32)
A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def _UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
A = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(a__ )
@property
def _UpperCAmelCase ( self ) -> Dict:
def extract(*a__ , **a__ ):
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> str:
A = torch.ones([0] )
def _UpperCAmelCase ( self , a__ ) -> Optional[Any]:
self.pixel_values.to(a__ )
return self
return Out()
return extract
def _UpperCAmelCase ( self ) -> List[str]:
A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=a__ )
A = self.dummy_vae
A = self.dummy_text_encoder
A = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A = 77
A = self.dummy_image.to(a__ )
A = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A = AltDiffusionImgaImgPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a__ )
A = alt_pipe.to(a__ )
alt_pipe.set_progress_bar_config(disable=a__ )
A = '''A painting of a squirrel eating a burger'''
A = torch.Generator(device=a__ ).manual_seed(0 )
A = alt_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=a__ , )
A = output.images
A = torch.Generator(device=a__ ).manual_seed(0 )
A = alt_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=a__ , return_dict=a__ , )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _UpperCAmelCase ( self ) -> Tuple:
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=a__ )
A = self.dummy_vae
A = self.dummy_text_encoder
A = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A = 77
A = self.dummy_image.to(a__ )
# put models in fp16
A = unet.half()
A = vae.half()
A = bert.half()
# make sure here that pndm scheduler skips prk
A = AltDiffusionImgaImgPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a__ )
A = alt_pipe.to(a__ )
alt_pipe.set_progress_bar_config(disable=a__ )
A = '''A painting of a squirrel eating a burger'''
A = torch.manual_seed(0 )
A = alt_pipe(
[prompt] , generator=a__ , num_inference_steps=2 , output_type="""np""" , image=a__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _UpperCAmelCase ( self ) -> Tuple:
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A = init_image.resize((760, 504) )
A = '''BAAI/AltDiffusion'''
A = AltDiffusionImgaImgPipeline.from_pretrained(
a__ , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A = '''A fantasy landscape, trending on artstation'''
A = torch.manual_seed(0 )
A = pipe(
prompt=a__ , image=a__ , strength=0.75 , guidance_scale=7.5 , generator=a__ , output_type="""np""" , )
A = output.images[0]
A = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
A = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> List[str]:
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A = init_image.resize((768, 512) )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A = '''BAAI/AltDiffusion'''
A = AltDiffusionImgaImgPipeline.from_pretrained(
a__ , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A = '''A fantasy landscape, trending on artstation'''
A = torch.manual_seed(0 )
A = pipe(
prompt=a__ , image=a__ , strength=0.75 , guidance_scale=7.5 , generator=a__ , output_type="""np""" , )
A = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 708 |
import os
def _lowerCAmelCase ( UpperCamelCase__: str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as in_file:
A = in_file.read()
A = [[int(UpperCamelCase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
A = [[0 for cell in row] for row in grid]
A = len(grid[0] )
A = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
A = grid[0][0]
for i in range(1 , UpperCamelCase__ ):
A = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCamelCase__ ):
A = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCamelCase__ ):
for j in range(1 , UpperCamelCase__ ):
A = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 546 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
A__: Optional[Any] = HfApi()
A__: str = {}
# fmt: off
A__: Any = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
A__: str = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
A__: Dict = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
A__: Any = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
A__: List[Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
A__: Tuple = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
A__: Any = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
A__: List[Any] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
A__: int = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
A__: str = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
A__: Dict = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
A__: Any = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
A__: Any = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
A__: List[Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
A__: Union[str, Any] = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
A__: str = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
A__: List[Any] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f"Started running {mod.modelId}!!!")
if mod.modelId.startswith('''CompVis'''):
A__: Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
A__: str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
A__: int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
A__: List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
A__: List[str] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(f"{mod.modelId} has passed successfully!!!")
| 380 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__: Union[str, Any] = logging.get_logger(__name__)
A__: int = {'''vocab_file''': '''sentencepiece.model'''}
A__: Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
A__: Any = {
'''google/rembert''': 256,
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str]=False , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Dict=True , __lowerCamelCase: Tuple="[CLS]" , __lowerCamelCase: List[str]="[SEP]" , __lowerCamelCase: List[str]="[UNK]" , __lowerCamelCase: Union[str, Any]="[SEP]" , __lowerCamelCase: List[Any]="[PAD]" , __lowerCamelCase: Optional[int]="[CLS]" , __lowerCamelCase: str="[MASK]" , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: Optional[int] = do_lower_case
UpperCamelCase__: Optional[int] = remove_space
UpperCamelCase__: str = keep_accents
UpperCamelCase__: Union[str, Any] = vocab_file
UpperCamelCase__: Any = spm.SentencePieceProcessor()
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.__dict__.copy()
UpperCamelCase__: str = None
return state
def __setstate__( self: Optional[int] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: int = d
UpperCamelCase__: List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.sp_model.EncodeAsPieces(__lowerCamelCase )
return pieces
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.sp_model.decode_pieces(__lowerCamelCase )
return out_string
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: int = [self.sep_token_id]
UpperCamelCase__: int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Dict = [self.sep_token_id]
UpperCamelCase__: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__lowerCamelCase ) )
return
UpperCamelCase__: Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 380 | 1 |
def _a ( __lowercase = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , __lowercase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 701 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_snake_case = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> int:
__UpperCamelCase = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(
image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 567 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : Dict = "fnet"
def __init__( self : Any , a_ : str=3_20_00 , a_ : Tuple=7_68 , a_ : Union[str, Any]=12 , a_ : List[Any]=30_72 , a_ : Dict="gelu_new" , a_ : str=0.1 , a_ : List[str]=5_12 , a_ : List[str]=4 , a_ : Optional[Any]=0.02 , a_ : Optional[Any]=1E-12 , a_ : int=False , a_ : List[Any]=5_12 , a_ : Optional[int]=3 , a_ : Dict=1 , a_ : List[str]=2 , **a_ : Optional[int] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
a__ : Optional[Any] = vocab_size
a__ : Dict = max_position_embeddings
a__ : Optional[Any] = hidden_size
a__ : Optional[int] = num_hidden_layers
a__ : Dict = intermediate_size
a__ : Dict = hidden_act
a__ : Optional[Any] = hidden_dropout_prob
a__ : Optional[Any] = initializer_range
a__ : Any = type_vocab_size
a__ : List[Any] = layer_norm_eps
a__ : Optional[Any] = use_tpu_fourier_optimizations
a__ : Any = tpu_short_seq_length
| 642 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any]=None ) -> str:
'''simple docstring'''
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
a__ : Optional[Any] = nn.Parameter(lowerCAmelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
a__ : str = nn.Parameter(lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
# set torch weights for 1-to-1 comparison
a__ : Tuple = np.asarray(weights[0] )
a__ : Any = np.asarray(weights[1] )
a__ : Optional[Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
# set torch weights for 1-to-1 comparison
a__ : int = np.asarray(weights[0] )
a__ : List[str] = np.asarray(weights[1] )
a__ : Optional[int] = np.asarray(weights[2] )
a__ : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
# layernorm 1
a__ : int = weights[0][0][0]
a__ : Optional[Any] = np.asarray(layer_norm_a[0] )
a__ : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# lsh weights + output
a__ : Dict = weights[0][1]
if len(lowerCAmelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
# intermediate weighs
a__ : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase__ ) == 4:
a__ : List[str] = intermediate_weights[2]
# layernorm 2
a__ : List[Any] = np.asarray(intermediate_weights[0][0] )
a__ : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# intermediate dense
a__ : Tuple = np.asarray(intermediate_weights[1][0] )
a__ : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
# intermediate out
a__ : Optional[Any] = np.asarray(intermediate_weights[4][0] )
a__ : Dict = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
# reformer model
a__ : int = torch_model.reformer
# word embeds
a__ : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase__ ) , )
if isinstance(weights[3] , lowerCAmelCase__ ):
a__ : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
a__ : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
a__ : List[str] = nn.Parameter(torch.tensor(lowerCAmelCase__ ) )
a__ : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
a__ : Union[str, Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# output layer norm
a__ : str = np.asarray(weights[7][0] )
a__ : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# output embeddings
a__ : str = np.asarray(weights[9][0] )
a__ : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def lowercase__ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
# Initialise PyTorch model
a__ : List[Any] = ReformerConfig.from_json_file(lowerCAmelCase__ )
print(F"Building PyTorch model from configuration: {config}" )
a__ : Optional[Any] = ReformerModelWithLMHead(lowerCAmelCase__ )
with open(lowerCAmelCase__ , "rb" ) as f:
a__ : List[str] = pickle.load(lowerCAmelCase__ )["weights"]
set_model_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 642 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def A__ ( A : Optional[int] , A : List[str] , A : int , A : Any):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase : Union[str, Any] = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
UpperCamelCase : Optional[int] = F'''{src_lang}-{tgt_lang}'''
UpperCamelCase : Optional[int] = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=A , exist_ok=A)
UpperCamelCase : Union[str, Any] = os.path.join(A , "README.md")
print(F'''Generating {path}''')
with open(A , "w" , encoding="utf-8") as f:
f.write(A)
# make sure we are under the root of the project
lowerCAmelCase_ = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCAmelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 435 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A__ ( A : Namespace):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
lowerCAmelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[int] = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=lowerCamelCase , required=lowerCamelCase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=lowerCamelCase , required=lowerCamelCase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=lowerCamelCase , required=lowerCamelCase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=lowerCamelCase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=lowerCamelCase , default=lowerCamelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
UpperCamelCase : List[Any] = model_type
UpperCamelCase : List[str] = tf_checkpoint
UpperCamelCase : Tuple = pytorch_dump_output
UpperCamelCase : Tuple = config
UpperCamelCase : Tuple = finetuning_task_name
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase : str = self._tf_checkpoint
UpperCamelCase : Dict = ""
else:
UpperCamelCase : Optional[int] = self._tf_checkpoint
UpperCamelCase : List[Any] = ""
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase , self._config , self._pytorch_dump_output , lowerCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 435 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class snake_case__ :
def __init__( self : Union[str, Any] , A__ : Dict , A__ : Optional[int]=13 , A__ : Optional[int]=7 , A__ : Optional[Any]=True , A__ : Any=True , A__ : int=True , A__ : List[Any]=True , A__ : List[Any]=99 , A__ : Dict=32 , A__ : Tuple=2 , A__ : Optional[int]=4 , A__ : str=37 , A__ : Tuple="gelu" , A__ : Optional[int]=0.1 , A__ : int=0.1 , A__ : int=5_12 , A__ : int=16 , A__ : Union[str, Any]=2 , A__ : Optional[Any]=0.02 , A__ : List[str]=3 , A__ : str=4 , A__ : Union[str, Any]=None , A__ : int=0 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : List[Any] = use_token_type_ids
snake_case_ : Optional[int] = use_labels
snake_case_ : Tuple = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : str = scope
snake_case_ : Dict = projection_dim
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Tuple = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
snake_case_ : List[Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[Any] , A__ : Union[str, Any] , A__ : Dict , A__ : List[Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] , A__ : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = TFDPRContextEncoder(config=A__ )
snake_case_ : Dict = model(A__ , attention_mask=A__ , token_type_ids=A__ )
snake_case_ : Union[str, Any] = model(A__ , token_type_ids=A__ )
snake_case_ : Optional[Any] = model(A__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : Any , A__ : Union[str, Any] , A__ : Union[str, Any] , A__ : Dict , A__ : int , A__ : str , A__ : Optional[Any] , A__ : List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = TFDPRQuestionEncoder(config=A__ )
snake_case_ : Union[str, Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ )
snake_case_ : List[Any] = model(A__ , token_type_ids=A__ )
snake_case_ : Optional[Any] = model(A__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : Any , A__ : Union[str, Any] , A__ : str , A__ : Optional[Any] , A__ : List[str] , A__ : List[Any] , A__ : Optional[Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = TFDPRReader(config=A__ )
snake_case_ : Dict = model(A__ , attention_mask=A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) : Optional[Any] = config_and_inputs
snake_case_ : Optional[Any] = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class snake_case__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = TFDPRModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*A__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*A__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*A__ )
@slow
def UpperCAmelCase__ ( self : str ) -> Dict:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(A__ )
self.assertIsNotNone(A__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(A__ )
self.assertIsNotNone(A__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained(A__ )
self.assertIsNotNone(A__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : List[str] = TFDPRReader.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_tf
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
snake_case_ : List[Any] = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case_ : Tuple = model(A__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case_ : List[str] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__a = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = torch.load(_lowercase, map_location="""cpu""" )
return sd
def A_ ( _lowercase, _lowercase, _lowercase=rename_keys_prefix ):
'''simple docstring'''
snake_case_ :Union[str, Any] = OrderedDict()
snake_case_ :Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case_ :Tuple = key
for name_pair in rename_keys_prefix:
snake_case_ :int = new_key.replace(name_pair[0], name_pair[1] )
snake_case_ :Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case_ :Optional[int] = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
snake_case_ :List[str] = """pretraining"""
if "vcr" in checkpoint_path:
snake_case_ :Optional[int] = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
snake_case_ :Any = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
snake_case_ :int = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
snake_case_ :Optional[int] = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
snake_case_ :Tuple = {"""visual_embedding_dim""": 512}
snake_case_ :int = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
snake_case_ :List[str] = {"""visual_embedding_dim""": 2048}
snake_case_ :str = """vqa_advanced"""
elif "vqa" in checkpoint_path:
snake_case_ :Dict = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
snake_case_ :int = """vqa"""
elif "nlvr" in checkpoint_path:
snake_case_ :Optional[int] = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
snake_case_ :Dict = """nlvr"""
snake_case_ :Union[str, Any] = VisualBertConfig(**_lowercase )
# Load State Dict
snake_case_ :Optional[Any] = load_state_dict(_lowercase )
snake_case_ :Optional[int] = get_new_dict(_lowercase, _lowercase )
if model_type == "pretraining":
snake_case_ :Optional[int] = VisualBertForPreTraining(_lowercase )
elif model_type == "vqa":
snake_case_ :Optional[int] = VisualBertForQuestionAnswering(_lowercase )
elif model_type == "nlvr":
snake_case_ :List[str] = VisualBertForVisualReasoning(_lowercase )
elif model_type == "multichoice":
snake_case_ :Dict = VisualBertForMultipleChoice(_lowercase )
model.load_state_dict(_lowercase )
# Save Checkpoints
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 310 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = """time_series_transformer"""
_A : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self: Dict , snake_case: Optional[int] = None , snake_case: Optional[int] = None , snake_case: str = "student_t" , snake_case: str = "nll" , snake_case: int = 1 , snake_case: List[int] = [1, 2, 3, 4, 5, 6, 7] , snake_case: Optional[Union[str, bool]] = "mean" , snake_case: int = 0 , snake_case: int = 0 , snake_case: int = 0 , snake_case: int = 0 , snake_case: Optional[List[int]] = None , snake_case: Optional[List[int]] = None , snake_case: int = 32 , snake_case: int = 32 , snake_case: int = 2 , snake_case: int = 2 , snake_case: int = 2 , snake_case: int = 2 , snake_case: bool = True , snake_case: str = "gelu" , snake_case: int = 64 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: int = 100 , snake_case: float = 0.0_2 , snake_case: List[str]=True , **snake_case: List[str] , ) -> Union[str, Any]:
# time series specific configuration
snake_case_ :Any = prediction_length
snake_case_ :Any = context_length or prediction_length
snake_case_ :int = distribution_output
snake_case_ :Any = loss
snake_case_ :List[Any] = input_size
snake_case_ :Any = num_time_features
snake_case_ :Any = lags_sequence
snake_case_ :Any = scaling
snake_case_ :Any = num_dynamic_real_features
snake_case_ :List[str] = num_static_real_features
snake_case_ :List[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ :int = cardinality
else:
snake_case_ :List[str] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ :int = embedding_dimension
else:
snake_case_ :int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ :Dict = num_parallel_samples
# Transformer architecture configuration
snake_case_ :Any = input_size * len(snake_case ) + self._number_of_features
snake_case_ :Dict = d_model
snake_case_ :Optional[Any] = encoder_attention_heads
snake_case_ :Tuple = decoder_attention_heads
snake_case_ :Any = encoder_ffn_dim
snake_case_ :Any = decoder_ffn_dim
snake_case_ :Tuple = encoder_layers
snake_case_ :int = decoder_layers
snake_case_ :Tuple = dropout
snake_case_ :Any = attention_dropout
snake_case_ :List[str] = activation_dropout
snake_case_ :Any = encoder_layerdrop
snake_case_ :str = decoder_layerdrop
snake_case_ :Union[str, Any] = activation_function
snake_case_ :List[str] = init_std
snake_case_ :int = use_cache
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 310 | 1 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = '''▁'''
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : Optional[int] = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase_ : List[Any] = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase_ : List[Any] = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
UpperCAmelCase_ : Optional[int] = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _lowerCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
__lowercase : Dict = ['''input_ids''']
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[int] = RESOURCE_FILES_NAMES
def __init__( self , __lowercase , __lowercase=None , __lowercase=False , __lowercase="utf8" , __lowercase="[UNK]" , __lowercase="[SEP]" , __lowercase="[PAD]" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase = None , **__lowercase , ):
"""simple docstring"""
__A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , vocab_file=__lowercase , encoding=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__A : int = do_lower_case
__A : str = sentencepiece_model_ckpt
__A : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__A : Union[str, Any] = self.load_vocab(filepath=__lowercase )
else:
__A : List[str] = {self.sp_model.id_to_piece(__lowercase ): id for id in range(self.sp_model.get_piece_size() )}
__A : Any = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
if text is None:
return None
__A : List[Any] = self.tokenize(__lowercase )
__A : Dict = "", []
for i, ch in enumerate(__lowercase ):
if ch in self.SP_CHAR_MAPPING:
__A : Dict = self.SP_CHAR_MAPPING.get(__lowercase )
else:
__A : Any = unicodedata.normalize('NFKC' , __lowercase )
if self.is_whitespace(__lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowercase ) )
__A : Any = normalized_text, [], 0
if self.do_lower_case:
__A : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__A : Tuple = token[1:]
__A : List[Any] = text[offset:].index(__lowercase ) + offset
__A : Union[str, Any] = start + len(__lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__A : Optional[int] = end
return token_mapping
@property
def snake_case__ ( self ):
"""simple docstring"""
return len(self.vocab )
def snake_case__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
__A : str = self.__dict__.copy()
__A : List[Any] = None
return state
def __setstate__( self , __lowercase ):
"""simple docstring"""
__A : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : List[str] = {}
__A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__lowercase , __lowercase ) for c in text) )
def snake_case__ ( self , __lowercase , __lowercase=False , __lowercase=64 , __lowercase=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
__A : Union[str, Any] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
__A : Union[str, Any] = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
__A : List[Any] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
__A : Union[str, Any] = self.sp_model.EncodeAsPieces(__lowercase )
else:
__A : Union[str, Any] = self.sp_model.SampleEncodeAsPieces(__lowercase , __lowercase , __lowercase )
__A : int = []
for pi, piece in enumerate(__lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowercase ) and pi != 0:
new_pieces.append(__lowercase )
continue
else:
continue
__A : int = 0
for i, chunk in enumerate(__lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowercase ) or self.is_punct(__lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowercase )
__A : Any = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A : str = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A : List[Any] = i
if len(__lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Any = "".join(__lowercase ).replace(__lowercase , ' ' ).strip()
return out_string
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : int = self.convert_ids_to_tokens(__lowercase )
__A : Optional[Any] = "".join(__lowercase ).replace(__lowercase , ' ' ).strip()
return out_string
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return self.vocab.get(__lowercase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return self.reverse_vocab.get(__lowercase , self.unk_token )
def snake_case__ ( self , __lowercase , __lowercase=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
__A : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self , __lowercase , __lowercase=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self , __lowercase , __lowercase=None , __lowercase=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1]
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowercase ) + 1) + [1] * (len(__lowercase ) + 3)
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowercase ) == 1:
__A : Tuple = unicodedata.category(__lowercase )
if cat == "Zs":
return True
return False
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Tuple = {}
with io.open(__lowercase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(__lowercase ):
__A : Optional[int] = line.rstrip('\n' )
__A : Union[str, Any] = int(__lowercase )
return token_to_idx
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : Union[str, Any] = 0
if os.path.isdir(__lowercase ):
__A : str = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__A : Optional[int] = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(__lowercase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
__A : str = token_index
writer.write(token + '\n' )
index += 1
__A : Optional[Any] = os.path.join(__lowercase , 'sentencepiece.bpe.model' )
with open(__lowercase , 'wb' ) as fi:
__A : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (vocab_file,)
| 365 |
def __snake_case ( _lowerCAmelCase : int ) -> bool:
if num < 0:
return False
A_ : int = num
A_ : int = 0
while num > 0:
A_ : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 454 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=2 , A=3 , A=16 , A=[1, 2, 1] , A=[2, 2, 4] , A=2 , A=2.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=True , A=0.0_2 , A=1e-5 , A=True , A=None , A=True , A=10 , A=8 , A=["stage1", "stage2", "stage3"] , A=[1, 2, 3] , ) -> int:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : str = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Tuple = embed_dim
UpperCAmelCase : Union[str, Any] = depths
UpperCAmelCase : str = num_heads
UpperCAmelCase : Any = window_size
UpperCAmelCase : Union[str, Any] = mlp_ratio
UpperCAmelCase : List[Any] = qkv_bias
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Dict = drop_path_rate
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Optional[int] = use_absolute_embeddings
UpperCAmelCase : Optional[int] = patch_norm
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : List[str] = scope
UpperCAmelCase : str = use_labels
UpperCAmelCase : List[Any] = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = encoder_stride
UpperCAmelCase : Optional[int] = out_features
UpperCAmelCase : List[str] = out_indices
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase( self , A , A , A ) -> Tuple:
UpperCAmelCase : int = MaskFormerSwinModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase : List[Any] = model(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Tuple = MaskFormerSwinBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase : Dict = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = ["""stem"""]
UpperCAmelCase : Dict = MaskFormerSwinBackbone(config=_lowerCAmelCase )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[Any] = MaskFormerSwinModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[Any]:
return
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase( self ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase( self ) -> Any:
pass
def _lowercase( self ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase( self ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self , A , A , A , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : List[Any] = outputs.hidden_states
UpperCAmelCase : Any = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swin has a different seq_length
UpperCAmelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase : Any = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase( self ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase( self ) -> Tuple:
pass
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(A ):
UpperCAmelCase : str = 0
return t
def check_equivalence(A , A , A , A={} ):
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase ).to_tuple()
def recursive_check(A , A ):
if isinstance(_lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCAmelCase ) , set_nan_tensor_to_zero(_lowerCAmelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}. Dict has'''
f''' `nan`: {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}.'''
) , )
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase : Optional[Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : List[Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCAmelCase : Dict = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : str = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : str = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
UpperCAmelCase : Optional[int] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCAmelCase : str = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase_ ( unittest.TestCase , _lowerCAmelCase ):
lowercase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase = MaskFormerSwinConfig
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = MaskFormerSwinModelTester(self )
def _lowercase( self ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase : int = backbone_class(_lowerCAmelCase )
backbone.to(_lowerCAmelCase )
backbone.eval()
UpperCAmelCase : Union[str, Any] = backbone(**_lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase : Dict = backbone(**_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase : str = backbone(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 709 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase =logging.getLogger()
def _A ( ):
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""-f""" )
A = parser.parse_args()
return args.f
def _A ( _a : List[Any] ):
"""simple docstring"""
A = {}
A = os.path.join(_a , """all_results.json""" )
if os.path.exists(_a ):
with open(_a , """r""" ) as f:
A = json.load(_a )
else:
raise ValueError(f'can\'t find {path}' )
return results
def _A ( ):
"""simple docstring"""
A = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
UpperCAmelCase =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[int]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
A = tempfile.mkdtemp()
A = os.path.join(cls.tmpdir ,"""default_config.yml""" )
write_basic_config(save_location=cls.configPath )
A = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def UpperCamelCase__ ( cls ) -> Dict:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase__ ( self ) -> Dict:
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
self.assertLess(result["""perplexity"""] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase__ ( self ) -> List[str]:
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
self.assertLess(result["""perplexity"""] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase__ ( self ) -> Optional[int]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
A = 7 if get_gpu_count() > 1 else 2
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
self.assertLess(result["""train_loss"""] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase__ ( self ) -> int:
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] ,2_8 )
self.assertGreaterEqual(result["""eval_exact"""] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 )
self.assertGreaterEqual(result["""eval_rouge2"""] ,2 )
self.assertGreaterEqual(result["""eval_rougeL"""] ,7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase__ ( self ) -> List[str]:
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""translation_no_trainer""" ) ) )
@slow
def UpperCamelCase__ ( self ) -> Dict:
A = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase_ )
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 )
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.get_auto_remove_tmp_dir()
A = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
A = get_results(lowerCamelCase_ )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ ,"""image_classification_no_trainer""" ) ) )
| 617 |
"""simple docstring"""
from statistics import mean, stdev
def _A ( _a : list , _a : int = 3 ):
"""simple docstring"""
A = min(_a )
A = max(_a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _a ) for x in data]
def _A ( _a : list , _a : int = 3 ):
"""simple docstring"""
A = mean(_a )
A = stdev(_a )
# standardize data
return [round((x - mu) / (sigma) , _a ) for x in data]
| 617 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( a : str , a : str = " " ):
a__ = []
a__ = 0
for index, char in enumerate(a ):
if char == separator:
split_words.append(string[last_index:index] )
a__ = index + 1
elif index + 1 == len(a ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__A : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
super().__init__()
a__ = torchvision.models.resnetaaa(pretrained=_a )
a__ = list(model.children() )[:-2]
a__ = nn.Sequential(*_a )
a__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowercase__ ( self , _a ):
"""simple docstring"""
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
a__ = self.pool(self.model(_a ) )
a__ = torch.flatten(_a , start_dim=2 )
a__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a ):
"""simple docstring"""
a__ = [json.loads(_a ) for l in open(_a )]
a__ = os.path.dirname(_a )
a__ = tokenizer
a__ = labels
a__ = len(_a )
a__ = max_seq_length
a__ = transforms
def __len__( self ):
"""simple docstring"""
return len(self.data )
def __getitem__( self , _a ):
"""simple docstring"""
a__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=_a ) )
a__ , a__ , a__ = sentence[0], sentence[1:-1], sentence[-1]
a__ = sentence[: self.max_seq_length]
a__ = torch.zeros(self.n_classes )
a__ = 1
a__ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
a__ = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowercase__ ( self ):
"""simple docstring"""
a__ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowerCAmelCase_ ( a : int ):
a__ = [len(row['sentence'] ) for row in batch]
a__ , a__ = len(a ), max(a )
a__ = torch.zeros(a , a , dtype=torch.long )
a__ = torch.zeros(a , a , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(a , a ) ):
a__ = input_row['sentence']
a__ = 1
a__ = torch.stack([row['image'] for row in batch] )
a__ = torch.stack([row['label'] for row in batch] )
a__ = torch.stack([row['image_start_token'] for row in batch] )
a__ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCAmelCase_ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCAmelCase_ ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 126 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
| 8 |
def __lowerCamelCase ( _lowercase = 50000000 ) -> int:
UpperCamelCase = set()
UpperCamelCase = int((limit - 24) ** (1 / 2) )
UpperCamelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _lowercase ) ) )
for primea in primes:
UpperCamelCase = primea * primea
for primea in primes:
UpperCamelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase = primea * primea * primea * primea
UpperCamelCase = square + cube + tetr
if total >= limit:
break
ret.add(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F"{solution() = }")
| 282 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : Tuple = 250
UpperCAmelCase__ : Optional[int] = ids_tensor((batch_size, length) , _UpperCAmelCase )
UpperCAmelCase__ : List[Any] = torch.ones((batch_size, length) , device=_UpperCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[int] = self._get_tensors(5 )
UpperCAmelCase__ : Optional[int] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : int = self._get_tensors(9 )
self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : Dict = self._get_tensors(10 )
self.assertTrue(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = MaxLengthCriteria(max_length=10 )
UpperCAmelCase__ : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : Dict = self._get_tensors(9 )
self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase__ : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Dict = self._get_tensors(5 )
UpperCAmelCase__ : List[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase__ : Dict = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_UpperCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase__ : Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
| 714 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def lowerCAmelCase__ ( a_ : Tuple , a_ : Dict , a_ : Tuple ) -> List[str]:
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(repo_id=a_ , path=a_ , revision=a_ )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a_ )}"""
| 599 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 590 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
@slow
@require_torch
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''', '''prajjwal1/bert-tiny''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = bertabert.config.encoder.vocab_size
lowercase__ = tokenizer.sep_token_id
lowercase__ = tokenizer.cls_token_id
lowercase__ = 128
lowercase__ = datasets.load_dataset('''cnn_dailymail''', '''3.0.0''', split='''train[:1%]''' )
lowercase__ = datasets.load_dataset('''cnn_dailymail''', '''3.0.0''', split='''validation[:1%]''' )
lowercase__ = train_dataset.select(range(32 ) )
lowercase__ = val_dataset.select(range(16 ) )
lowercase__ = 4
def _map_to_encoder_decoder_inputs(lowerCamelCase : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ = tokenizer(batch['''article'''], padding='''max_length''', truncation=lowerCamelCase, max_length=512 )
lowercase__ = tokenizer(batch['''highlights'''], padding='''max_length''', truncation=lowerCamelCase, max_length=128 )
lowercase__ = inputs.input_ids
lowercase__ = inputs.attention_mask
lowercase__ = outputs.input_ids
lowercase__ = outputs.input_ids.copy()
lowercase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
lowercase__ = outputs.attention_mask
assert all(len(lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCamelCase : Any ):
lowercase__ = pred.label_ids
lowercase__ = pred.predictions
# all unnecessary tokens are removed
lowercase__ = tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCamelCase ) )] ) / len(lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
lowercase__ = train_dataset.map(
_map_to_encoder_decoder_inputs, batched=lowerCamelCase, batch_size=lowerCamelCase, remove_columns=['''article''', '''highlights'''], )
train_dataset.set_format(
type='''torch''', columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''], )
# same for validation dataset
lowercase__ = val_dataset.map(
_map_to_encoder_decoder_inputs, batched=lowerCamelCase, batch_size=lowerCamelCase, remove_columns=['''article''', '''highlights'''], )
val_dataset.set_format(
type='''torch''', columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''], )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = SeqaSeqTrainingArguments(
output_dir=lowerCamelCase, per_device_train_batch_size=lowerCamelCase, per_device_eval_batch_size=lowerCamelCase, predict_with_generate=lowerCamelCase, evaluation_strategy='''steps''', do_train=lowerCamelCase, do_eval=lowerCamelCase, warmup_steps=0, eval_steps=2, logging_steps=2, )
# instantiate trainer
lowercase__ = SeqaSeqTrainer(
model=lowerCamelCase, args=lowerCamelCase, compute_metrics=_compute_metrics, train_dataset=lowerCamelCase, eval_dataset=lowerCamelCase, tokenizer=lowerCamelCase, )
# start training
trainer.train()
| 183 | 0 |
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if height >= 1:
move_tower(height - 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
move_disk(_UpperCamelCase , _UpperCamelCase )
move_tower(height - 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
print('''moving disk from''' , _UpperCamelCase , '''to''' , _UpperCamelCase )
def __snake_case ( ) -> Optional[int]:
_a = int(input('''Height of hanoi: ''' ).strip() )
move_tower(_UpperCamelCase , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 346 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase :Any = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase :List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase :List[str] = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase :Optional[Any] = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase :List[Any] = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase :int = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __snake_case ( _UpperCamelCase ) -> Dict:
_a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _UpperCamelCase )
return [m.group(0 ) for m in matches]
def __snake_case ( ) -> Union[str, Any]:
_a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a = collections.defaultdict(_UpperCamelCase )
_a = collections.defaultdict(_UpperCamelCase )
_a = collections.defaultdict(_UpperCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_UpperCamelCase ):
_a = None
if _re_tf_models.match(_UpperCamelCase ) is not None:
_a = tf_models
_a = _re_tf_models.match(_UpperCamelCase ).groups()[0]
elif _re_flax_models.match(_UpperCamelCase ) is not None:
_a = flax_models
_a = _re_flax_models.match(_UpperCamelCase ).groups()[0]
elif _re_pt_models.match(_UpperCamelCase ) is not None:
_a = pt_models
_a = _re_pt_models.match(_UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
_a = True
break
# Try again after removing the last word in the name
_a = ''''''.join(camel_case_split(_UpperCamelCase )[:-1] )
_a = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a = list(_UpperCamelCase )
all_models.sort()
_a = {'''model_type''': all_models}
_a = [pt_models[t] for t in all_models]
_a = [tf_models[t] for t in all_models]
_a = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a = '''AutoTokenizer'''
_a = [processors[t] for t in all_models]
return pd.DataFrame(_UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> int:
_a = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"]
_a = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_UpperCamelCase , _UpperCamelCase ):
continue
# First extract all model_names
_a = []
for name in getattr(_UpperCamelCase , _UpperCamelCase ).values():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
model_names.append(_UpperCamelCase )
else:
model_names.extend(list(_UpperCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
_a = get_frameworks_table()
_a = Dataset.from_pandas(_UpperCamelCase )
_a = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=_UpperCamelCase )
_a = Dataset.from_json(_UpperCamelCase )
_a = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(_UpperCamelCase ) )
}
_a = update_pipeline_and_auto_class_table(_UpperCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a = sorted(table.keys() )
_a = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_a = Dataset.from_pandas(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_UpperCamelCase , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(_UpperCamelCase , '''pipeline_tags.json''' ) )
if commit_sha is not None:
_a = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
_a = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=_UpperCamelCase , repo_type='''dataset''' , token=_UpperCamelCase , commit_message=_UpperCamelCase , )
def __snake_case ( ) -> str:
_a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a = transformers_module.pipelines.SUPPORTED_TASKS
_a = []
for key in pipeline_tasks:
if key not in in_table:
_a = pipeline_tasks[key]['''pt''']
if isinstance(_UpperCamelCase , (list, tuple) ):
_a = model[0]
_a = model.__name__
if model not in in_table.values():
missing.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_a = ''', '''.join(_UpperCamelCase )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
lowerCamelCase :str = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase :Dict = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 346 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : str = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = emb.weight.shape
UpperCAmelCase__ : Dict = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = emb.weight.data
return lin_layer
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
UpperCAmelCase__ : Union[str, Any] = {}
for old_key in state_dict.keys():
UpperCAmelCase__ : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase__ : int = key.replace('''moe_layer.experts.0''' , F"""ffn.experts.expert_{expert_idx}""" )
else:
UpperCAmelCase__ : int = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
UpperCAmelCase__ : Tuple = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
UpperCAmelCase__ : List[Any] = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
UpperCAmelCase__ : Tuple = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
UpperCAmelCase__ : Union[str, Any] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase__ : List[Any] = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
UpperCAmelCase__ : int = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
UpperCAmelCase__ : Tuple = state_dict[old_key]
return new_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = WEIGHTS_NAME ) -> int:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Tuple = 0
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
for expert in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(lowerCAmelCase__ ):
UpperCAmelCase__ : int = torch.load(lowerCAmelCase__ )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
UpperCAmelCase__ : str = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = os.path.join(
lowerCAmelCase__ , weights_name.replace('''.bin''' , F"""-{len(lowerCAmelCase__ )+1:05d}-of-???.bin""" ) )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowerCAmelCase__ )[0]].dtype )
# Add the last block
UpperCAmelCase__ : Union[str, Any] = os.path.join(lowerCAmelCase__ , weights_name.replace('''.bin''' , F"""-{len(lowerCAmelCase__ )+1:05d}-of-???.bin""" ) )
UpperCAmelCase__ : Optional[int] = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : str = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowerCAmelCase__ ) == 1:
UpperCAmelCase__ : Union[str, Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
# Otherwise, let's build the index
UpperCAmelCase__ : int = {}
for idx, shard in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : List[str] = weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-{len(lowerCAmelCase__ ):05d}.bin""" )
UpperCAmelCase__ : Tuple = os.path.join(lowerCAmelCase__ , weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
for key in shard:
UpperCAmelCase__ : Optional[int] = shard_file
# Add the metadata
UpperCAmelCase__ : str = {'''total_size''': total_size}
UpperCAmelCase__ : Optional[int] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : Optional[int] = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '''\n'''
f.write(lowerCAmelCase__ )
return metadata, index
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ , UpperCamelCase__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
UpperCamelCase__ = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCamelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 75 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = OmegaConf.load(_lowercase )
SCREAMING_SNAKE_CASE : int = torch.load(_lowercase , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE : int = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : List[str] = '''first_stage_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Any = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
SCREAMING_SNAKE_CASE : int = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE : Tuple = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**_lowercase ).eval()
vqvae.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = UNetLDMModel(**_lowercase ).eval()
unet.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = LDMPipeline(_lowercase , _lowercase , _lowercase )
pipeline.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
__UpperCamelCase : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 248 | 0 |
import string
def a__ (__lowercase :str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
_A : List[str] = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
_A : int = string.ascii_uppercase.find(__lowercase )
_A : List[Any] = num - key
if num < 0:
_A : int = num + len(string.ascii_uppercase )
_A : Optional[int] = translated + string.ascii_uppercase[num]
else:
_A : str = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def a__ () -> None:
_A : List[str] = input('''Encrypted message: ''' )
_A : Any = message.upper()
decrypt(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 332 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_UpperCamelCase : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase : List[Any] =256
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Tuple = ["melgan"]
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
# From MELGAN
_A : Any = math.log(1E-5 ) # Matches MelGAN training.
_A : int = 4.0 # Largest value for most examples
_A : int = 128
self.register_modules(
notes_encoder=A__ ,continuous_encoder=A__ ,decoder=A__ ,scheduler=A__ ,melgan=A__ ,)
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False ):
_A , _A : int = output_range
if clip:
_A : int = torch.clip(A__ ,self.min_value ,self.max_value )
# Scale to [0, 1].
_A : Optional[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False ):
_A , _A : Dict = input_range
_A : Tuple = torch.clip(A__ ,A__ ,A__ ) if clip else outputs
# Scale to [0, 1].
_A : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self ,A__ ,A__ ,A__ ):
_A : Tuple = input_tokens > 0
_A , _A : str = self.notes_encoder(
encoder_input_tokens=A__ ,encoder_inputs_mask=A__ )
_A , _A : List[str] = self.continuous_encoder(
encoder_inputs=A__ ,encoder_inputs_mask=A__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self ,A__ ,A__ ,A__ ):
_A : str = noise_time
if not torch.is_tensor(A__ ):
_A : Any = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
_A : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : int = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
_A : Dict = self.decoder(
encodings_and_masks=A__ ,decoder_input_tokens=A__ ,decoder_noise_time=A__ )
return logits
@torch.no_grad()
def __call__( self ,A__ ,A__ = None ,A__ = 100 ,A__ = True ,A__ = "numpy" ,A__ = None ,A__ = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(A__ )}.""" )
_A : Any = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
_A : Optional[int] = np.zeros([1, 0, self.n_dims] ,np.floataa )
_A : Dict = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device )
for i, encoder_input_tokens in enumerate(A__ ):
if i == 0:
_A : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
_A : Dict = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_A : Optional[int] = ones
_A : Tuple = self.scale_features(
A__ ,output_range=[-1.0, 1.0] ,clip=A__ )
_A : Tuple = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=A__ ,continuous_mask=A__ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_A : Any = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=A__ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(A__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_A : Union[str, Any] = self.decode(
encodings_and_masks=A__ ,input_tokens=A__ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
_A : List[str] = self.scheduler.step(A__ ,A__ ,A__ ,generator=A__ ).prev_sample
_A : Union[str, Any] = self.scale_to_features(A__ ,input_range=[-1.0, 1.0] )
_A : Optional[Any] = mel[:1]
_A : int = mel.cpu().float().numpy()
_A : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__ )
logger.info('''Generated segment''' ,A__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_A : Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_A : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__ )
| 332 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_lowerCAmelCase = "gpt_neox"
def __init__(self , _lowercase=50432 , _lowercase=6144 , _lowercase=44 , _lowercase=64 , _lowercase=24576 , _lowercase="gelu" , _lowercase=0.25 , _lowercase=10000 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=2048 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase=True , _lowercase=0 , _lowercase=2 , _lowercase=False , _lowercase=True , _lowercase=None , **_lowercase , ):
'''simple docstring'''
super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__ )
__a : int = vocab_size
__a : Dict = max_position_embeddings
__a : List[Any] = hidden_size
__a : Any = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Any = intermediate_size
__a : Union[str, Any] = hidden_act
__a : int = rotary_pct
__a : int = rotary_emb_base
__a : Optional[int] = attention_dropout
__a : List[str] = hidden_dropout
__a : Dict = classifier_dropout
__a : List[str] = initializer_range
__a : List[str] = layer_norm_eps
__a : str = use_cache
__a : Tuple = tie_word_embeddings
__a : Any = use_parallel_residual
__a : Tuple = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''' )
__a : Tuple = self.rope_scaling.get("""type""" , a__ )
__a : int = self.rope_scaling.get("""factor""" , a__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(a__ , a__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 581 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowercase_)
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase : ClassVar[Features] = Features({"audio": Audio()})
lowerCamelCase : ClassVar[Features] = Features({"labels": ClassLabel})
lowerCamelCase : str = "audio"
lowerCamelCase : str = "labels"
def __lowercase ( self , a__ ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , a__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__snake_case :Any = copy.deepcopy(self )
__snake_case :List[Any] = self.label_schema.copy()
__snake_case :List[Any] = features[self.label_column]
__snake_case :Optional[Any] = label_schema
return task_template
@property
def __lowercase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 455 | 0 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_snake_case = (boundary[1] - boundary[0]) / steps
_snake_case = boundary[0]
_snake_case = boundary[1]
_snake_case = make_points(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = 0.0
y += (h / 2.0) * f(lowerCAmelCase_ )
for i in x_i:
# print(i)
y += h * f(lowerCAmelCase_ )
y += (h / 2.0) * f(lowerCAmelCase_ )
return y
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = a + h
while x < (b - h):
yield x
_snake_case = x + h
def snake_case ( lowerCAmelCase_ ) -> List[Any]: # enter your function here
_snake_case = (x - 0) * (x - 0)
return y
def snake_case ( ) -> Union[str, Any]:
_snake_case = 0.0 # Lower bound of integration
_snake_case = 1.0 # Upper bound of integration
_snake_case = 10.0 # define number of steps or resolution
_snake_case = [a, b] # define boundary of integration
_snake_case = method_a(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 404 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case = {
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
snake_case = '''▁'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Dict = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = BigBirdTokenizer
A__ : Dict = ['''input_ids''', '''attention_mask''']
A__ : List[int] = []
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : List[str]="[SEP]" , __lowerCamelCase : str="[MASK]" , __lowerCamelCase : str="[CLS]" , **__lowerCamelCase : Any , ):
"""simple docstring"""
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 404 | 1 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_A = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_A = [0, 25, 50]
_A = [25, 50, 75]
_A = fuzz.membership.trimf(X, abca)
_A = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_A = np.ones(75)
_A = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_A = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_A = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_A = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_A = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_A = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_A = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_A = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_A = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 182 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 182 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""PerceiverFeatureExtractor"""]
__UpperCAmelCase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 79 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=1 ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
SCREAMING_SNAKE_CASE : str = [[w, v]]
if not self.graph.get(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = []
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return list(self.graph )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : str ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any]=-2 , lowerCamelCase_ : str=-1 ):
'''simple docstring'''
if s == d:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = []
if s == -2:
SCREAMING_SNAKE_CASE : Tuple = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1]
else:
SCREAMING_SNAKE_CASE : Any = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int]=-1 ):
'''simple docstring'''
if c == -1:
SCREAMING_SNAKE_CASE : str = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=-2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = deque()
SCREAMING_SNAKE_CASE : List[str] = []
if s == -2:
SCREAMING_SNAKE_CASE : int = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
SCREAMING_SNAKE_CASE : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return len(self.graph[u] )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any]=-2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = []
if s == -2:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = s
SCREAMING_SNAKE_CASE : List[str] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE : int = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE : List[Any] = stack[len(lowerCamelCase_ ) - 1]
else:
SCREAMING_SNAKE_CASE : int = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return sorted_nodes
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = -2
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Union[str, Any] = s
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE : int = True
if len(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1]
else:
SCREAMING_SNAKE_CASE : List[str] = False
indirect_parents.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = s
SCREAMING_SNAKE_CASE : List[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Any = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = -2
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Tuple = s
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE : Dict = True
if len(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE : List[str] = stack[len(lowerCamelCase_ ) - 1]
else:
SCREAMING_SNAKE_CASE : List[Any] = False
indirect_parents.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = s
SCREAMING_SNAKE_CASE : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str=-2 , lowerCamelCase_ : int=-1 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = time()
self.dfs(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = time()
return end - begin
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple=-2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = time()
self.bfs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = time()
return end - begin
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {}
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=1 ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
SCREAMING_SNAKE_CASE : Any = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
SCREAMING_SNAKE_CASE : Any = [[w, u]]
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
# the other way round
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str=-2 , lowerCamelCase_ : List[str]=-1 ):
'''simple docstring'''
if s == d:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Any = []
if s == -2:
SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1]
else:
SCREAMING_SNAKE_CASE : List[str] = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str]=-1 ):
'''simple docstring'''
if c == -1:
SCREAMING_SNAKE_CASE : Any = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
SCREAMING_SNAKE_CASE : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=-2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = deque()
SCREAMING_SNAKE_CASE : Tuple = []
if s == -2:
SCREAMING_SNAKE_CASE : Any = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
SCREAMING_SNAKE_CASE : List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
return len(self.graph[u] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = -2
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Any = s
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE : Optional[int] = True
if len(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
indirect_parents.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = s
SCREAMING_SNAKE_CASE : str = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Any = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = -2
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : int = s
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE : Any = True
if len(lowerCamelCase_ ) != 0:
SCREAMING_SNAKE_CASE : str = stack[len(lowerCamelCase_ ) - 1]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = False
indirect_parents.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = s
SCREAMING_SNAKE_CASE : Tuple = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return list(self.graph )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str]=-2 , lowerCamelCase_ : str=-1 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = time()
self.dfs(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = time()
return end - begin
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict=-2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = time()
self.bfs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = time()
return end - begin
| 79 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
lowercase : int = tmp_path / '''cache'''
lowercase : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase : Union[str, Any] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_sql_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowercase : str = tmp_path / '''cache'''
lowercase : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase : Optional[Any] = features.copy() if features else default_expected_features
lowercase : Dict = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_sql_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : str ):
with contextlib.closing(sqlitea.connect(UpperCAmelCase_ ) ) as con:
lowercase : Optional[int] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ):
lowercase : Dict = tmp_path / '''cache'''
lowercase : Dict = os.path.join(UpperCAmelCase_ , '''tmp.sql''' )
lowercase : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
SqlDatasetWriter(UpperCAmelCase_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
lowercase : Any = iter_sql_file(UpperCAmelCase_ )
lowercase : Optional[int] = iter_sql_file(UpperCAmelCase_ )
for rowa, rowa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
lowercase : List[str] = tmp_path / '''cache'''
lowercase : List[Any] = os.path.join(UpperCAmelCase_ , '''tmp.sql''' )
lowercase : Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
SqlDatasetWriter(UpperCAmelCase_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
lowercase : str = iter_sql_file(UpperCAmelCase_ )
lowercase : str = iter_sql_file(UpperCAmelCase_ )
for rowa, rowa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
lowercase : Tuple = tmp_path / '''cache'''
lowercase : Optional[Any] = os.path.join(UpperCAmelCase_ , '''tmp.sql''' )
lowercase : str = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
with pytest.raises(UpperCAmelCase_ ):
SqlDatasetWriter(UpperCAmelCase_ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 583 |
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def lowerCamelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowerCamelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 583 | 1 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : int = str(id_ )
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Any = {} # {vertex:distance}
def __lt__( self , UpperCamelCase_ ):
return self.key < other.key
def __repr__( self ):
return self.id
def _snake_case ( self , UpperCamelCase_ ):
self.neighbors.append(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = weight
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1] , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
"""simple docstring"""
__UpperCAmelCase : Tuple = []
for u in graph:
__UpperCAmelCase : Tuple = math.inf
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = graph[:]
while q:
__UpperCAmelCase : Tuple = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCAmelCase : List[Any] = u
__UpperCAmelCase : str = u.edges[v.id]
for i in range(1 , len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
__UpperCAmelCase : Union[str, Any] = math.inf
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
__UpperCAmelCase : List[str] = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCAmelCase : Any = u
__UpperCAmelCase : List[Any] = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict="last" , _UpperCAmelCase : Any=None , _UpperCAmelCase : int=None , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , langs=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
lowercase__ = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """traced_model.pt""" ) )
lowercase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """traced_model.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ = model(_UpperCAmelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 15 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait()
| 67 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE : Tuple = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_lowerCamelCase ) , torch_builtin(_lowerCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(_lowerCamelCase ) , gelu_new(_lowerCamelCase ) ) )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE : List[str] = get_activation('''gelu''' )
SCREAMING_SNAKE_CASE : Optional[Any] = get_activation('''gelu_10''' )
SCREAMING_SNAKE_CASE : Optional[int] = torch_builtin(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = geluaa(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_lowerCamelCase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __lowerCAmelCase ( self ) ->str:
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_lowerCamelCase ):
get_activation('''bogus''' )
with self.assertRaises(_lowerCamelCase ):
get_activation(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = get_activation('''gelu''' )
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : Optional[int] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = acta.a
| 333 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = torch.load(a__ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : List[str] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : str = v
else:
SCREAMING_SNAKE_CASE : Dict = v
SCREAMING_SNAKE_CASE : int = chkpt['''params''']
SCREAMING_SNAKE_CASE : Tuple = {n: v for n, v in config.items() if not isinstance(a__ , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE : List[str] = chkpt['''dico_word2id''']
SCREAMING_SNAKE_CASE : Optional[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : List[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
SCREAMING_SNAKE_CASE : List[Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(a__ , a__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , indent=2 ) + '''\n''' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 333 | 1 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_snake_case = logging.get_logger(__name__)
_snake_case = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : int , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Any ) -> bool:
"""simple docstring"""
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = max_length
_lowerCAmelCase = max_position_embeddings
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : int ) -> bool:
"""simple docstring"""
_lowerCAmelCase = input_ids.shape[-1]
_lowerCAmelCase = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'exceptions, performance degradation, or nothing at all.' )
return is_done
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> str:
"""simple docstring"""
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'with `max_length = start_length + max_new_tokens` instead.' , UpperCAmelCase_ , )
_lowerCAmelCase = start_length
_lowerCAmelCase = max_new_tokens
_lowerCAmelCase = start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : List[Any] , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Tuple ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[float] = None ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = max_time
_lowerCAmelCase = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : int , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : List[str] ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Union[str, Any] , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : List[Any] ) -> bool:
"""simple docstring"""
return any(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) for criteria in self )
@property
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
return None
def __snake_case ( SCREAMING_SNAKE_CASE: StoppingCriteriaList , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = stopping_criteria.max_length
_lowerCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , SCREAMING_SNAKE_CASE )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE ) )
return new_stopping_criteria
| 580 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def __snake_case ( SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: List[Any]=1.0 , SCREAMING_SNAKE_CASE: Any=None , SCREAMING_SNAKE_CASE: Any=None ):
"""simple docstring"""
if rng is None:
_lowerCAmelCase = global_rng
_lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Optional[int]=400 , UpperCAmelCase_ : List[str]=2_000 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : Optional[Any]=160 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : str=4_000 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Tuple=True , ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = min_seq_length
_lowerCAmelCase = max_seq_length
_lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase = padding_value
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = return_attention_mask
_lowerCAmelCase = do_normalize
_lowerCAmelCase = feature_size
_lowerCAmelCase = chunk_length
_lowerCAmelCase = hop_length
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(UpperCAmelCase_ : Optional[Any] ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
_lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def __lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCAmelCase = WhisperFeatureExtractionTester(self )
def __lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = feat_extract_first.mel_filters
_lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(UpperCAmelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = feat_extract_first.mel_filters
_lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test batched
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase = np.asarray(UpperCAmelCase_ )
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test truncation required
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
_lowerCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs_truncated]
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def __lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
import torch
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowerCAmelCase = self._load_datasamples(1 )
_lowerCAmelCase = WhisperFeatureExtractor()
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1E-4 ) )
def __lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = self._load_datasamples(1 )[0]
_lowerCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_lowerCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCAmelCase_ )[0]
self.assertTrue(np.all(np.mean(UpperCAmelCase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ ) - 1 ) < 1E-3 ) )
| 580 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : List[str] ={
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] =[
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_UpperCamelCase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=_snake_case , dtype=jnp.bfloataa )
__lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa )
__lowerCamelCase = controlnet_params
__lowerCamelCase = '''bird'''
__lowerCamelCase = jax.device_count()
__lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(_snake_case , jax.device_count() )
__lowerCamelCase = replicate(_snake_case )
__lowerCamelCase = shard(_snake_case )
__lowerCamelCase = shard(_snake_case )
__lowerCamelCase = pipe(
prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=_snake_case , dtype=jnp.bfloataa )
__lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_snake_case , from_pt=_snake_case , dtype=jnp.bfloataa )
__lowerCamelCase = controlnet_params
__lowerCamelCase = '''Chef in the kitchen'''
__lowerCamelCase = jax.device_count()
__lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(_snake_case , jax.device_count() )
__lowerCamelCase = replicate(_snake_case )
__lowerCamelCase = shard(_snake_case )
__lowerCamelCase = shard(_snake_case )
__lowerCamelCase = pipe(
prompt_ids=_snake_case , image=_snake_case , params=_snake_case , prng_seed=_snake_case , num_inference_steps=50 , jit=_snake_case , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 575 | 0 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
lowerCAmelCase : Union[str, Any] = 'pytorch_model.bin'
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""})
lowerCAmelCase_ = dataclasses.field(
default=snake_case_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""})
lowerCAmelCase_ = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""})
lowerCAmelCase_ = dataclasses.field(
default=snake_case_ , metadata={"""help""": """A csv or a json file containing the validation data."""})
lowerCAmelCase_ = dataclasses.field(
default=snake_case_ , metadata={"""help""": """The name of the task to train on."""} , )
lowerCAmelCase_ = dataclasses.field(
default=snake_case_ , metadata={"""help""": """The list of labels for the task."""})
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""})
lowerCAmelCase_ = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""})
lowerCAmelCase_ = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
lowerCAmelCase_ = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
lowerCAmelCase_ = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
lowerCAmelCase_ = dataclasses.field(
default=snake_case_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
lowerCAmelCase_ = dataclasses.field(
default=snake_case_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
lowerCAmelCase_ = dataclasses.field(
default=snake_case_ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
lowerCAmelCase_ = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
lowerCAmelCase_ = dataclasses.field(
default=1_00 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
lowerCAmelCase_ = dataclasses.field(
default=snake_case_ , metadata={"""help""": """Random seed for initialization."""} , )
def A_( A : List[str] , A : int , A : Optional[Any] , A : Tuple , A : List[str] , A : Any):
UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1)
if args.do_filter_by_confidence:
UpperCamelCase = dataset.filter(lambda A: example["probability"] > args.confidence_threshold)
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCamelCase = int(eval_result * len(A))
print(A)
UpperCamelCase = dataset.sort('probability' , reverse=A)
UpperCamelCase = dataset.select(range(A))
UpperCamelCase = dataset.remove_columns(['label', 'probability'])
UpperCamelCase = dataset.rename_column('prediction' , 'label')
UpperCamelCase = dataset.map(lambda A: {"label": idalabel[example["label"]]})
UpperCamelCase = dataset.shuffle(seed=args.seed)
UpperCamelCase = os.path.join(A , f'''train_pseudo.{args.data_file_extension}''')
if args.data_file_extension == "csv":
dataset.to_csv(A , index=A)
else:
dataset.to_json(A)
def A_( A : Union[str, Any] , A : Tuple , A : Tuple , A : Any , **A : Any):
UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCamelCase = STModelArguments(model_name_or_path=A)
UpperCamelCase = STDataArguments(train_file=A , infer_file=A)
UpperCamelCase = STTrainingArguments(output_dir=A)
UpperCamelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A).items():
setattr(A , A , A)
for key, value in kwargs.items():
if hasattr(A , A):
setattr(A , A , A)
# Sanity checks
UpperCamelCase = {}
UpperCamelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCamelCase = args.train_file
UpperCamelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCamelCase = args.eval_file
for key in data_files:
UpperCamelCase = data_files[key].split('.')[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
UpperCamelCase = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
logger.info('Creating the initial data directory for self-training...')
UpperCamelCase = f'''{args.output_dir}/self-train_iter-{{}}'''.format
UpperCamelCase = data_dir_format(0)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A)
os.makedirs(A , exist_ok=A)
accelerator.wait_for_everyone()
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = False
# Show the progress bar
UpperCamelCase = tqdm(range(args.max_selftrain_iterations) , disable=not accelerator.is_local_main_process)
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations)):
UpperCamelCase = data_dir_format(A)
assert os.path.exists(A)
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCamelCase = os.path.join(A , 'stage-1')
UpperCamelCase = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A , A):
arguments_dict.update({key: value})
UpperCamelCase = os.path.join(A , 'best-checkpoint' , A)
if os.path.exists(A):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , A , A , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , A)
finetune(**A)
accelerator.wait_for_everyone()
assert os.path.exists(A)
logger.info('Self-training job completed: iteration: %d, stage: 1.' , A)
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCamelCase = os.path.join(A , 'best-checkpoint')
UpperCamelCase = os.path.join(A , 'stage-2')
# Update arguments_dict
UpperCamelCase = model_path
UpperCamelCase = data_files['train']
UpperCamelCase = current_output_dir
UpperCamelCase = os.path.join(A , 'best-checkpoint' , A)
if os.path.exists(A):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , A , A , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , A)
finetune(**A)
accelerator.wait_for_everyone()
assert os.path.exists(A)
logger.info('Self-training job completed: iteration: %d, stage: 2.' , A)
UpperCamelCase = iteration
UpperCamelCase = data_dir_format(iteration + 1)
UpperCamelCase = AutoConfig.from_pretrained(os.path.join(A , 'best-checkpoint'))
UpperCamelCase = config.idalabel
UpperCamelCase = os.path.join(A , 'eval_results_best-checkpoint.json')
UpperCamelCase = os.path.join(A , 'test_results_best-checkpoint.json')
assert os.path.exists(A)
with open(A , 'r') as f:
UpperCamelCase = float(json.load(A)[args.eval_metric])
UpperCamelCase = os.path.join(A , 'infer_output_best-checkpoint.csv')
assert os.path.exists(A)
# Loading the dataset from local csv or json files.
UpperCamelCase = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']})['data']
UpperCamelCase = load_dataset('csv' , data_files={'data': infer_output_file})['data']
if accelerator.is_main_process:
os.makedirs(A , exist_ok=A)
shutil.copy(A , os.path.join(A , f'''eval_results_iter-{iteration}.json'''))
if os.path.exists(A):
shutil.copy(A , os.path.join(A , f'''test_results_iter-{iteration}.json'''))
create_pseudo_labeled_data(A , A , A , A , A , A)
accelerator.wait_for_everyone()
UpperCamelCase = os.path.join(A , f'''train_pseudo.{args.data_file_extension}''')
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCamelCase = eval_result
if best_iteration is None:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
UpperCamelCase = 0
else:
if new_eval_result == best_eval_result:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCamelCase = True
progress_bar.update(1)
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , A)
logger.info('Best evaluation result: %s = %f' , args.eval_metric , A)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , f'''eval_results_iter-{iteration}.json''') , os.path.join(A , 'eval_results_best-iteration.json') , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1)
logger.info('Best evaluation result: %s = %f' , args.eval_metric , A)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''') , os.path.join(A , 'eval_results_best-iteration.json') , )
| 3 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
snake_case: Dict = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
snake_case: Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = self.get_dummy_inputs()
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: int = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = self.get_dummy_inputs()
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: int = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.get_dummy_inputs()
snake_case: Tuple = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Dict = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.get_dummy_inputs()
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_dummy_inputs()
snake_case: Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ort.SessionOptions()
snake_case: int = False
return options
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case: Dict = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'A fantasy landscape, trending on artstation'
snake_case: List[Any] = torch.manual_seed(0 )
snake_case: str = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
snake_case: Tuple = output.images
snake_case: Any = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case: Any = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case: str = init_image.resize((1_28, 1_28) )
snake_case: Tuple = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
snake_case: List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'A fantasy landscape, trending on artstation'
snake_case: Any = torch.manual_seed(0 )
snake_case: Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
snake_case: Any = output.images
snake_case: int = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case: Dict = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 329 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase = 16
lowercase = 32
def A__ ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int = 16 ) -> Dict:
'''simple docstring'''
snake_case__ : int = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case__ : int = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Dict = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Dict = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
snake_case__ : List[Any] = 8
else:
snake_case__ : str = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
snake_case__ : int = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
snake_case__ : Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase = mocked_dataloaders # noqa: F811
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1":
snake_case__ : List[str] = 2
# Initialize accelerator
snake_case__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Dict = config["lr"]
snake_case__ : Tuple = int(config["num_epochs"] )
snake_case__ : Optional[int] = int(config["seed"] )
snake_case__ : Tuple = int(config["batch_size"] )
snake_case__ : Any = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case__ : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : List[str] = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
snake_case__, snake_case__ : Optional[int] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Any = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Any = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : Dict = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
snake_case__ : Tuple = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : int = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : str = model(**_UpperCAmelCase )
snake_case__ : Any = outputs.loss
snake_case__ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case__ : Union[str, Any] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Any = model(**_UpperCAmelCase )
snake_case__ : int = outputs.logits.argmax(dim=-1 )
snake_case__, snake_case__ : int = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case__ : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
snake_case__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCAmelCase )
def A__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
snake_case__ : Optional[int] = parser.parse_args()
snake_case__ : Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 150 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
lowercase = """"""
lowercase = """"""
lowercase = """"""
lowercase = """"""
def A__ ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
snake_case__ : Any = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ : Any = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
snake_case__ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
snake_case__ : Optional[int] = api.user_timeline(screen_name=_UpperCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
snake_case__ : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
snake_case__ : int = api.user_timeline(
screen_name=_UpperCAmelCase , count=2_00 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
snake_case__ : List[Any] = alltweets[-1].id - 1
print(F"""...{len(_UpperCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
snake_case__ : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
snake_case__ : Optional[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 150 | 1 |
"""simple docstring"""
import argparse
import datetime
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
snake_case_ : Union[str, Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__UpperCamelCase ) < 1_1:
raise ValueError("""Must be 10 characters long""" )
# Get month
snake_case_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError("""Month must be between 1 - 12""" )
snake_case_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
snake_case_ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
snake_case_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
snake_case_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
snake_case_ : Union[str, Any] = datetime.date(int(__UpperCamelCase ) , int(__UpperCamelCase ) , int(__UpperCamelCase ) )
# Start math
if m <= 2:
snake_case_ : Any = y - 1
snake_case_ : Union[str, Any] = m + 1_2
# maths var
snake_case_ : int = int(str(__UpperCamelCase )[:2] )
snake_case_ : int = int(str(__UpperCamelCase )[2:] )
snake_case_ : int = int(2.6 * m - 5.39 )
snake_case_ : int = int(c / 4 )
snake_case_ : int = int(k / 4 )
snake_case_ : int = int(d + k )
snake_case_ : int = int(t + u + v + x )
snake_case_ : int = int(z - (2 * c) )
snake_case_ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
snake_case_ : str = F'Your date {date_input}, is a {days[str(__UpperCamelCase )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : str = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
__lowerCAmelCase : List[Any] = parser.parse_args()
zeller(args.date_input)
| 58 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class lowerCamelCase__ ( A_ ):
lowerCamelCase_ : Optional[int] = '''blip_2_vision_model'''
def __init__(self : str , _snake_case : str=1408 , _snake_case : Any=6144 , _snake_case : Any=39 , _snake_case : int=16 , _snake_case : str=224 , _snake_case : List[str]=14 , _snake_case : Optional[int]="gelu" , _snake_case : Dict=0.0_0001 , _snake_case : Tuple=0.0 , _snake_case : str=1e-10 , _snake_case : List[Any]=True , **_snake_case : Tuple , ) -> int:
"""simple docstring"""
super().__init__(**_snake_case )
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Any = patch_size
lowerCamelCase_ : Tuple = image_size
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : Dict = attention_dropout
lowerCamelCase_ : str = layer_norm_eps
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : int = qkv_bias
@classmethod
def UpperCAmelCase_ (cls : Tuple , _snake_case : int , **_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
lowerCamelCase_ : Optional[int] = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowerCamelCase_ : List[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_snake_case , **_snake_case )
class lowerCamelCase__ ( A_ ):
lowerCamelCase_ : Any = '''blip_2_qformer'''
def __init__(self : List[Any] , _snake_case : Optional[int]=3_0522 , _snake_case : List[Any]=768 , _snake_case : int=12 , _snake_case : Optional[Any]=12 , _snake_case : Union[str, Any]=3072 , _snake_case : Tuple="gelu" , _snake_case : str=0.1 , _snake_case : str=0.1 , _snake_case : Optional[Any]=512 , _snake_case : Tuple=0.02 , _snake_case : str=1e-12 , _snake_case : Any=0 , _snake_case : Optional[Any]="absolute" , _snake_case : int=2 , _snake_case : Optional[int]=1408 , **_snake_case : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Any = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : Optional[int] = intermediate_size
lowerCamelCase_ : int = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Tuple = layer_norm_eps
lowerCamelCase_ : List[Any] = position_embedding_type
lowerCamelCase_ : List[str] = cross_attention_frequency
lowerCamelCase_ : Tuple = encoder_hidden_size
@classmethod
def UpperCAmelCase_ (cls : Optional[int] , _snake_case : List[str] , **_snake_case : List[str] ) -> int:
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
lowerCamelCase_ : Tuple = cls.get_config_dict(_snake_case , **_snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowerCamelCase_ : List[str] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_snake_case , **_snake_case )
class lowerCamelCase__ ( A_ ):
lowerCamelCase_ : int = '''blip-2'''
lowerCamelCase_ : Optional[int] = True
def __init__(self : str , _snake_case : Union[str, Any]=None , _snake_case : Union[str, Any]=None , _snake_case : Optional[Any]=None , _snake_case : str=32 , **_snake_case : int ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
if vision_config is None:
lowerCamelCase_ : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
lowerCamelCase_ : List[str] = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
lowerCamelCase_ : Tuple = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
lowerCamelCase_ : Tuple = BlipaVisionConfig(**_snake_case )
lowerCamelCase_ : Optional[Any] = BlipaQFormerConfig(**_snake_case )
lowerCamelCase_ : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase_ : Dict = CONFIG_MAPPING[text_model_type](**_snake_case )
lowerCamelCase_ : Any = self.text_config.tie_word_embeddings
lowerCamelCase_ : int = self.text_config.is_encoder_decoder
lowerCamelCase_ : Optional[int] = num_query_tokens
lowerCamelCase_ : str = self.vision_config.hidden_size
lowerCamelCase_ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase_ : List[str] = 1.0
lowerCamelCase_ : str = 0.02
@classmethod
def UpperCAmelCase_ (cls : str , _snake_case : List[str] , _snake_case : Dict , _snake_case : Dict , **_snake_case : Optional[int] , ) -> Any:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_snake_case , )
def UpperCAmelCase_ (self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : List[str] = self.vision_config.to_dict()
lowerCamelCase_ : Optional[Any] = self.qformer_config.to_dict()
lowerCamelCase_ : str = self.text_config.to_dict()
lowerCamelCase_ : List[str] = self.__class__.model_type
return output
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : List[str] = 'gpt_neox'
def __init__(self : int , _snake_case : List[str]=5_0432 , _snake_case : List[Any]=6144 , _snake_case : Optional[Any]=44 , _snake_case : Dict=64 , _snake_case : Optional[Any]=2_4576 , _snake_case : str="gelu" , _snake_case : Optional[Any]=0.25 , _snake_case : int=1_0000 , _snake_case : int=0.0 , _snake_case : Any=0.0 , _snake_case : List[str]=0.1 , _snake_case : str=2048 , _snake_case : str=0.02 , _snake_case : Dict=1e-5 , _snake_case : int=True , _snake_case : str=0 , _snake_case : Tuple=2 , _snake_case : Tuple=False , _snake_case : int=True , _snake_case : List[str]=None , **_snake_case : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Tuple = max_position_embeddings
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : Optional[Any] = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Optional[Any] = rotary_pct
lowerCamelCase_ : Tuple = rotary_emb_base
lowerCamelCase_ : List[Any] = attention_dropout
lowerCamelCase_ : int = hidden_dropout
lowerCamelCase_ : List[Any] = classifier_dropout
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Dict = layer_norm_eps
lowerCamelCase_ : List[str] = use_cache
lowerCamelCase_ : Dict = tie_word_embeddings
lowerCamelCase_ : int = use_parallel_residual
lowerCamelCase_ : Dict = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCAmelCase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
lowerCamelCase_ : List[str] = self.rope_scaling.get('type' , _snake_case )
lowerCamelCase_ : Any = self.rope_scaling.get('factor' , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 144 | 0 |
from sklearn.metrics import recall_score
import datasets
UpperCamelCase__ = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
UpperCamelCase__ = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
UpperCamelCase__ = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def a ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 , _UpperCamelCase="binary" , _UpperCamelCase=None , _UpperCamelCase="warn" , ) -> Optional[int]:
"""simple docstring"""
__snake_case = recall_score(
_UpperCamelCase , _UpperCamelCase , labels=_UpperCamelCase , pos_label=_UpperCamelCase , average=_UpperCamelCase , sample_weight=_UpperCamelCase , zero_division=_UpperCamelCase , )
return {"recall": float(_UpperCamelCase ) if score.size == 1 else score}
| 268 |
from math import isqrt, loga
def lowerCamelCase__ ( __A :int ):
"""simple docstring"""
__snake_case = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__A ,__A ):
__snake_case = False
return [i for i in range(2 ,__A ) if is_prime[i]]
def lowerCamelCase__ ( __A :int = 8_0_0_8_0_0 ,__A :int = 8_0_0_8_0_0 ):
"""simple docstring"""
__snake_case = degree * loga(__A )
__snake_case = int(__A )
__snake_case = calculate_prime_numbers(__A )
__snake_case = 0
__snake_case = 0
__snake_case = len(__A ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 268 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : int = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = ["PoolFormerFeatureExtractor"]
UpperCamelCase__ : Dict = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = "▁"
UpperCamelCase__ : Any = {"vocab_file": "spiece.model"}
UpperCamelCase__ : int = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
UpperCamelCase__ : Optional[int] = {
"google/reformer-crime-and-punishment": 524_288,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None:
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def UpperCamelCase ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCamelCase ( self ) -> Dict[str, int]:
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A__ )
def UpperCamelCase ( self , A__ ) -> List[Any]:
if index < self.sp_model.get_piece_size():
_SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ )
return token
def UpperCamelCase ( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 0 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : str = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : List[Any] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_))))
__UpperCAmelCase : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__UpperCAmelCase : str = {"unk_token": "<unk>"}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCamelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCamelCase_))
__UpperCAmelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , UpperCamelCase_)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Any , **UpperCamelCase_ : Dict):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_)
def a_ ( self : Union[str, Any] , **UpperCamelCase_ : List[Any]):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_)
def a_ ( self : Tuple , **UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCAmelCase : List[str] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : int = self.get_rust_tokenizer()
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_)
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_)
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
__UpperCAmelCase : str = self.get_image_processor(do_normalize=UpperCamelCase_)
__UpperCAmelCase : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : List[str] = processor(images=UpperCamelCase_ , return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : str = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Tuple = "lower newer"
__UpperCAmelCase : List[Any] = processor(text=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Any = tokenizer(UpperCamelCase_ , return_tensors="np")
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = "lower newer"
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : Any = processor(text=UpperCamelCase_ , images=UpperCamelCase_)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Dict = "google/owlvit-base-patch32"
__UpperCAmelCase : str = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : int = ["cat", "nasa badge"]
__UpperCAmelCase : Dict = processor(text=UpperCamelCase_)
__UpperCAmelCase : List[Any] = 16
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "google/owlvit-base-patch32"
__UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = [["cat", "nasa badge"], ["person"]]
__UpperCAmelCase : List[str] = processor(text=UpperCamelCase_)
__UpperCAmelCase : List[str] = 16
__UpperCAmelCase : str = len(UpperCamelCase_)
__UpperCAmelCase : List[str] = max([len(UpperCamelCase_) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Tuple = "google/owlvit-base-patch32"
__UpperCAmelCase : Dict = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Any = ["cat", "nasa badge"]
__UpperCAmelCase : Optional[int] = processor(text=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 16
__UpperCAmelCase : Optional[Any] = inputs["input_ids"]
__UpperCAmelCase : Any = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Any = self.prepare_image_inputs()
__UpperCAmelCase : List[Any] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_)
self.assertListEqual(list(inputs.keys()) , ["query_pixel_values", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : str = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : List[str] = processor.batch_decode(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
| 77 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
A = pd.read_csv("""sample_data.csv""", header=None)
A = df.shape[:1][0]
# If you're using some other dataset input the target column
A = df.iloc[:, 1:2]
A = actual_data.values.reshape(len_data, 1)
A = MinMaxScaler().fit_transform(actual_data)
A = 10
A = 5
A = 20
A = len_data - periods * look_back
A = actual_data[:division]
A = actual_data[division - look_back :]
A , A = [], []
A , A = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
A = np.array(train_x)
A = np.array(test_x)
A = np.array([list(i.ravel()) for i in train_y])
A = np.array([list(i.ravel()) for i in test_y])
A = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
A = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
A = model.predict(x_test)
| 77 | 1 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
for attribute in key.split("."):
SCREAMING_SNAKE_CASE : Dict = getattr(_a , _a)
if weight_type is not None:
SCREAMING_SNAKE_CASE : Tuple = getattr(_a , _a).shape
else:
SCREAMING_SNAKE_CASE : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Tuple = value
else:
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
SCREAMING_SNAKE_CASE : Optional[int] = None
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : List[str] = True
elif name.split(".")[0] == "proj":
SCREAMING_SNAKE_CASE : int = fairseq_model.proj
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
SCREAMING_SNAKE_CASE : Optional[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : List[Any] = name.split(_a)[0].split(".")[-2]
SCREAMING_SNAKE_CASE : Optional[Any] = mapped_key.replace("*" , _a)
if "weight_g" in name:
SCREAMING_SNAKE_CASE : Any = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : int = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE : Any = "bias"
elif "weight" in name:
SCREAMING_SNAKE_CASE : int = "weight"
else:
SCREAMING_SNAKE_CASE : List[Any] = None
set_recursively(_a , _a , _a , _a , _a)
continue
if not is_used:
unused_weights.append(_a)
logger.warning(f"Unused weights: {unused_weights}")
return proj_weight
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : List[str] = full_name.split("conv_layers.")[-1]
SCREAMING_SNAKE_CASE : Any = name.split(".")
SCREAMING_SNAKE_CASE : int = int(items[0])
SCREAMING_SNAKE_CASE : List[Any] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Dict = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(_a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = emb.weight.shape
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(_a , _a , bias=_a)
SCREAMING_SNAKE_CASE : Tuple = emb.weight.data
return lin_layer
def lowerCamelCase__ ( _a):
with open(_a , "r" , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : str = f.readlines()
SCREAMING_SNAKE_CASE : Optional[int] = [line.split(" ")[0] for line in lines]
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a)
SCREAMING_SNAKE_CASE : Optional[int] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_a , range(4 , num_words + 4))))
return vocab_dict
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a , _a , _a , _a , _a , ):
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaConfig.from_pretrained(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechaTextaConfig.from_pretrained(
_a , vocab_size=_a , decoder_layers=_a , do_stable_layer_norm=_a)
SCREAMING_SNAKE_CASE : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_a , return_attention_mask=_a , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
SCREAMING_SNAKE_CASE : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : Any = WavaVecaModel(_a)
SCREAMING_SNAKE_CASE : Dict = recursively_load_weights_wavaveca(model.encoder , _a)
SCREAMING_SNAKE_CASE : Dict = SpeechaTextaForCausalLM(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_a)
# set output linear layer
unexpected_keys.remove("embed_out")
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(model.decoder.embed_out.detach())
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}")
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}")
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechEncoderDecoderModel(encoder=_a , decoder=_a)
SCREAMING_SNAKE_CASE : Optional[int] = False
# add projection layer
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(projection_layer.weight)
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(projection_layer.bias)
SCREAMING_SNAKE_CASE : int = create_vocab_dict(_a)
with open(os.path.join(_a , "vocab.json") , "w") as fp:
json.dump(_a , _a)
SCREAMING_SNAKE_CASE : Any = SpeechaTextaTokenizer(os.path.join(_a , "vocab.json"))
tokenizer.save_pretrained(_a)
SCREAMING_SNAKE_CASE : Optional[int] = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Dict = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : str = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = "speech_to_text_2"
SCREAMING_SNAKE_CASE : Any = "wav2vec2"
SCREAMING_SNAKE_CASE : str = SpeechEncoderDecoderConfig.from_dict(_a)
hf_wavavec.save_pretrained(_a)
feature_extractor.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 193 |
from collections.abc import Iterable
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , a : int | None = None ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = value
SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , a : Node | None = None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def __UpperCamelCase ( self : List[str] , a : Node , a : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(a ): # If it is the right children
SCREAMING_SNAKE_CASE : Dict = new_children
else:
SCREAMING_SNAKE_CASE : Any = new_children
else:
SCREAMING_SNAKE_CASE : List[Any] = new_children
def __UpperCamelCase ( self : Optional[Any] , a : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCamelCase ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def __UpperCamelCase ( self : List[str] , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Node(a ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE : Tuple = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE : Any = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE : str = new_node
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = parent_node.right
SCREAMING_SNAKE_CASE : int = parent_node
def __UpperCamelCase ( self : Any , *a : str ) -> None:
"""simple docstring"""
for value in values:
self.__insert(a )
def __UpperCamelCase ( self : Tuple , a : List[Any] ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
SCREAMING_SNAKE_CASE : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE : Optional[int] = node.left if value < node.value else node.right
return node
def __UpperCamelCase ( self : int , a : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE : Tuple = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = node.right
return node
def __UpperCamelCase ( self : int , a : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
SCREAMING_SNAKE_CASE : List[Any] = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE : Optional[Any] = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE : str = node.left
return node
def __UpperCamelCase ( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.search(a ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(a , a )
elif node.left is None: # Has only right children
self.__reassign_nodes(a , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(a , node.left )
else:
SCREAMING_SNAKE_CASE : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE : str = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCamelCase ( self : List[Any] , a : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCamelCase ( self : str , a : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCamelCase ( self : Optional[Any] , a : list , a : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(a , node.left )
arr.append(node.value )
self.inorder(a , node.right )
def __UpperCamelCase ( self : int , a : int , a : Node ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(a , a ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = []
if curr_node is not None:
SCREAMING_SNAKE_CASE : List[str] = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(_a)
# Prints all the elements of the list in order traversal
print(_a)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(_a)
print(_a)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 193 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__magic_name__ : List[str] = """llama"""
__magic_name__ : int = ["""past_key_values"""]
def __init__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any]=32000 , lowerCAmelCase : Dict=4096 , lowerCAmelCase : List[Any]=11008 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : int=32 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[int]="silu" , lowerCAmelCase : str=2048 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : str=1E-6 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Dict=0 , lowerCAmelCase : Any=1 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : Tuple = vocab_size
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = hidden_size
__UpperCamelCase : Optional[Any] = intermediate_size
__UpperCamelCase : Tuple = num_hidden_layers
__UpperCamelCase : Tuple = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__UpperCamelCase : str = num_attention_heads
__UpperCamelCase : Optional[int] = num_key_value_heads
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : str = initializer_range
__UpperCamelCase : str = rms_norm_eps
__UpperCamelCase : Dict = pretraining_tp
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''' )
__UpperCamelCase : Dict = self.rope_scaling.get("""type""" , lowerCAmelCase__ )
__UpperCamelCase : Optional[int] = self.rope_scaling.get("""factor""" , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 279 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Optional[int] = use_input_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : Tuple = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : Union[str, Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : Optional[Any] = num_choices
_UpperCAmelCase : Any = scope
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_input_mask:
_UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Tuple = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ (self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCAmelCase__ , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = FalconModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Any = FalconModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
_UpperCAmelCase : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
_UpperCAmelCase : List[Any] = FalconForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : str = True
_UpperCAmelCase : List[Any] = FalconForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
_UpperCAmelCase : int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , )
_UpperCAmelCase : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCAmelCase : Tuple = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["""hidden_states"""][0]
_UpperCAmelCase : Union[str, Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["""hidden_states"""][0]
# select random slice
_UpperCAmelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def snake_case_ (self ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Tuple = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
snake_case : Optional[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case : Dict = (FalconForCausalLM,) if is_torch_available() else ()
snake_case : Dict = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case : Optional[Any] = False
snake_case : Any = False
def snake_case_ (self ):
_UpperCAmelCase : Dict = FalconModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def snake_case_ (self ):
self.config_tester.run_common_tests()
def snake_case_ (self ):
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase , *_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_UpperCAmelCase : Dict = alibi
self.model_tester.create_and_check_model(lowerCAmelCase__ , *lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Tuple = input_dict["""input_ids"""]
_UpperCAmelCase : List[str] = input_ids.ne(1 ).to(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase : int = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Dict = 3
_UpperCAmelCase : int = """single_label_classification"""
_UpperCAmelCase : int = input_dict["""input_ids"""]
_UpperCAmelCase : Tuple = input_ids.ne(1 ).to(lowerCAmelCase__ )
_UpperCAmelCase : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase : List[str] = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = input_dict["""input_ids"""]
_UpperCAmelCase : str = FalconForCausalLM(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = input_ids.shape[0]
_UpperCAmelCase : int = model._convert_to_rw_cache(result.past_key_values )
_UpperCAmelCase : List[str] = model._convert_cache_to_standard_format(lowerCAmelCase__ , lowerCAmelCase__ )
for layer in range(len(lowerCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = """multi_label_classification"""
_UpperCAmelCase : List[str] = input_dict["""input_ids"""]
_UpperCAmelCase : Tuple = input_ids.ne(1 ).to(lowerCAmelCase__ )
_UpperCAmelCase : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase : Optional[Any] = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ (self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCAmelCase__ , """use_cache""" ):
return
_UpperCAmelCase : Dict = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
if "use_cache" not in inputs:
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : int = model(**lowerCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_UpperCAmelCase : Tuple = (
getattr(lowerCAmelCase__ , """decoder_layers""" , lowerCAmelCase__ )
or getattr(lowerCAmelCase__ , """num_decoder_layers""" , lowerCAmelCase__ )
or config.num_hidden_layers
)
_UpperCAmelCase : int = getattr(lowerCAmelCase__ , """num_kv_heads""" , config.num_attention_heads )
_UpperCAmelCase : List[str] = getattr(lowerCAmelCase__ , """d_model""" , config.hidden_size )
_UpperCAmelCase : Dict = embed_dim // num_attention_heads
_UpperCAmelCase : Any = outputs["""past_key_values"""]
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Dict = inputs["""input_ids"""].shape
for i in range(lowerCAmelCase__ ):
if config.new_decoder_architecture:
_UpperCAmelCase : str = config.num_attention_heads
elif config.multi_query:
_UpperCAmelCase : Optional[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
_UpperCAmelCase : str = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCAmelCase__ )
_UpperCAmelCase : Any = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
_UpperCAmelCase : Optional[int] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=1_9 )
_UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ (self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = FalconForCausalLM.from_pretrained(lowerCAmelCase__ )
model.eval()
model.to(lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 )
model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 )
model.generate(**lowerCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def snake_case_ (self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = FalconForCausalLM.from_pretrained(lowerCAmelCase__ )
model.eval()
model.to(device=lowerCAmelCase__ )
_UpperCAmelCase : str = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# Test results are the same with and without cache
_UpperCAmelCase : Optional[int] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 414 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : list[int] ):
lowercase_ : List[str] = []
if len(__snake_case ) == 1:
return [nums.copy()]
for _ in range(len(__snake_case ) ):
lowercase_ : Optional[int] = nums.pop(0 )
lowercase_ : Dict = permute(__snake_case )
for perm in permutations:
perm.append(__snake_case )
result.extend(__snake_case )
nums.append(__snake_case )
return result
def lowercase ( __snake_case : Dict ):
def backtrack(__snake_case : Optional[int] ):
if start == len(__snake_case ) - 1:
output.append(nums[:] )
else:
for i in range(__snake_case , len(__snake_case ) ):
lowercase_ : str = nums[i], nums[start]
backtrack(start + 1 )
lowercase_ : Any = nums[i], nums[start] # backtrack
lowercase_ : int = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__A : Union[str, Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 717 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> Optional[Any]:
lowercase_ : Any = inspect.getfile(accelerate.test_utils )
lowercase_ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowercase_ : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowercase_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def A ( self : List[str] ) -> List[str]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase_ : int = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def A ( self : List[Any] ) -> List[Any]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowercase_ : List[str] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def A ( self : str ) -> Union[str, Any]:
lowercase_ : Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Optional[int] ) -> Optional[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
lowercase_ : Optional[int] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
__A : List[Any] = Accelerator()
__A : Dict = (accelerator.state.process_index + 2, 10)
__A : Tuple = torch.randint(0, 10, shape).to(accelerator.device)
__A : Optional[Any] = ''''''
__A : Optional[int] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__A : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__A : Optional[int] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 141 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.