code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
a_ : Optional[int] = 1
a_ : List[str] = 3
a_ : List[Any] = (32, 32)
a_ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
return image
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
a_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__SCREAMING_SNAKE_CASE , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
torch.manual_seed(0 )
a_ : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = self.dummy_cond_unet_upscale
a_ : Any = DDPMScheduler()
a_ : int = DDIMScheduler(prediction_type='''v_prediction''' )
a_ : int = self.dummy_vae
a_ : List[Any] = self.dummy_text_encoder
a_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a_ : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ : Dict = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
a_ : Dict = StableDiffusionUpscalePipeline(
unet=__SCREAMING_SNAKE_CASE , low_res_scheduler=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , max_noise_level=350 , )
a_ : List[str] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = '''A painting of a squirrel eating a burger'''
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : List[Any] = sd_pipe(
[prompt] , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
a_ : Tuple = output.images
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : List[str] = sd_pipe(
[prompt] , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
a_ : int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
a_ : Optional[int] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : int = self.dummy_cond_unet_upscale
a_ : Any = DDPMScheduler()
a_ : Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
a_ : Optional[int] = self.dummy_vae
a_ : int = self.dummy_text_encoder
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a_ : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ : Tuple = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
a_ : Tuple = StableDiffusionUpscalePipeline(
unet=__SCREAMING_SNAKE_CASE , low_res_scheduler=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , max_noise_level=350 , )
a_ : List[Any] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = '''A painting of a squirrel eating a burger'''
a_ : List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
a_ : Optional[Any] = output.images
assert image.shape[0] == 2
a_ : Any = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Optional[int] = sd_pipe(
[prompt] , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
a_ : int = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Union[str, Any] = self.dummy_cond_unet_upscale
a_ : List[Any] = DDPMScheduler()
a_ : Tuple = DDIMScheduler(prediction_type='''v_prediction''' )
a_ : List[Any] = self.dummy_vae
a_ : Optional[int] = self.dummy_text_encoder
a_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a_ : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ : List[str] = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
a_ : Optional[Any] = unet.half()
a_ : Optional[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
a_ : List[Any] = StableDiffusionUpscalePipeline(
unet=__SCREAMING_SNAKE_CASE , low_res_scheduler=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , max_noise_level=350 , )
a_ : List[str] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = '''A painting of a squirrel eating a burger'''
a_ : Dict = torch.manual_seed(0 )
a_ : List[str] = sd_pipe(
[prompt] , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ).images
a_ : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
a_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
a_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
a_ : int = '''stabilityai/stable-diffusion-x4-upscaler'''
a_ : Tuple = StableDiffusionUpscalePipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
a_ : Any = '''a cat sitting on a park bench'''
a_ : Optional[int] = torch.manual_seed(0 )
a_ : Tuple = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
a_ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
a_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
a_ : int = '''stabilityai/stable-diffusion-x4-upscaler'''
a_ : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
a_ : Optional[int] = '''a cat sitting on a park bench'''
a_ : str = torch.manual_seed(0 )
a_ : List[str] = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
a_ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
a_ : int = '''stabilityai/stable-diffusion-x4-upscaler'''
a_ : Any = StableDiffusionUpscalePipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
a_ : Tuple = '''a cat sitting on a park bench'''
a_ : List[str] = torch.manual_seed(0 )
a_ : List[Any] = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type='''np''' , )
a_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = BartTokenizer
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple="replace" , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : int="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<pad>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=True , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
a_ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
a_ : Any = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
a_ : int = add_prefix_space
a_ : Union[str, Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
a_ : List[str] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a_ : int = '''post_processor'''
a_ : Optional[int] = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
a_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a_ : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
a_ : List[str] = tuple(state['''cls'''] )
a_ : List[str] = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
a_ : Optional[Any] = add_prefix_space
a_ : str = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
a_ : Dict = trim_offsets
a_ : Any = True
if changes_to_apply:
a_ : str = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
a_ : List[Any] = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
a_ : int = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
a_ : Any = value
def SCREAMING_SNAKE_CASE ( self : Any , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> BatchEncoding:
a_ : Tuple = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Dict ) -> BatchEncoding:
a_ : int = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
a_ : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Dict:
a_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : Optional[Any] = [self.sep_token_id]
a_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int = 10_00 ):
a_ , a_ : Union[str, Any] = 1, 1
a_ : List[Any] = 2
while True:
a_ : Optional[Any] = 0
a_ : Optional[Any] = fa + fa
a_ , a_ : Union[str, Any] = fa, f
index += 1
for _ in str(__A ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "timesformer"
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict=224 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=8 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Tuple=3072 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1e-6 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[str]="divided_space_time" , __SCREAMING_SNAKE_CASE : str=0 , **__SCREAMING_SNAKE_CASE : int , ) -> Tuple:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : str = image_size
a_ : str = patch_size
a_ : Tuple = num_channels
a_ : int = num_frames
a_ : Optional[int] = hidden_size
a_ : int = num_hidden_layers
a_ : str = num_attention_heads
a_ : Tuple = intermediate_size
a_ : str = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : List[Any] = attention_probs_dropout_prob
a_ : List[Any] = initializer_range
a_ : List[Any] = layer_norm_eps
a_ : Dict = qkv_bias
a_ : Optional[Any] = attention_type
a_ : Union[str, Any] = drop_path_rate
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : Union[str, Any] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
a_ : str = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
a_ : str = model(__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
a_ : Optional[int] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
a_ : Tuple = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _UpperCAmelCase ( __A : list , __A : list , __A : list , __A : list , __A : list ):
a_ : List[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__A )] )
a_ : int = np.array(__A )
a_ : str = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __A ) ) , x.transpose() ) , __A )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _UpperCAmelCase ( __A : list , __A : list , __A : list ):
a_ : str = (1, 2, 1)
a_ : int = (1, 1, 0, 7)
a_ : Tuple = SARIMAX(
__A , exog=__A , order=__A , seasonal_order=__A )
a_ : List[Any] = model.fit(disp=__A , maxiter=6_00 , method='''nm''' )
a_ : Union[str, Any] = model_fit.predict(1 , len(__A ) , exog=[test_match] )
return result[0]
def _UpperCAmelCase ( __A : list , __A : list , __A : list ):
a_ : str = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__A , __A )
a_ : Union[str, Any] = regressor.predict(__A )
return y_pred[0]
def _UpperCAmelCase ( __A : list ):
train_user.sort()
a_ : Optional[Any] = np.percentile(__A , 25 )
a_ : Optional[Any] = np.percentile(__A , 75 )
a_ : int = qa - qa
a_ : Optional[int] = qa - (iqr * 0.1)
return low_lim
def _UpperCAmelCase ( __A : list , __A : float ):
a_ : Optional[int] = 0
a_ : List[str] = 0
for i in list_vote:
if i > actual_result:
a_ : Optional[int] = not_safe + 1
else:
if abs(abs(__A ) - abs(__A ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__lowerCAmelCase = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
__lowerCAmelCase = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
__lowerCAmelCase = Normalizer().fit_transform(data_input_df.values)
# split data
__lowerCAmelCase = normalize_df[:, 2].tolist()
__lowerCAmelCase = normalize_df[:, 0].tolist()
__lowerCAmelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__lowerCAmelCase = normalize_df[:, [1, 2]].tolist()
__lowerCAmelCase = x[: len(x) - 1]
__lowerCAmelCase = x[len(x) - 1 :]
# for linear regression & sarimax
__lowerCAmelCase = total_date[: len(total_date) - 1]
__lowerCAmelCase = total_user[: len(total_user) - 1]
__lowerCAmelCase = total_match[: len(total_match) - 1]
__lowerCAmelCase = total_date[len(total_date) - 1 :]
__lowerCAmelCase = total_user[len(total_user) - 1 :]
__lowerCAmelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
__lowerCAmelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__lowerCAmelCase = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : str ):
a_ : Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
a_ : Dict = 1_92
a_ : int = 7_68
a_ : Dict = 12
a_ : List[str] = 3
a_ : Union[str, Any] = [8_00, 13_33]
a_ : Optional[int] = False
elif yolos_name == "yolos_s_dWr":
a_ : Tuple = 3_30
a_ : List[str] = 14
a_ : Any = 6
a_ : List[str] = 13_20
elif "yolos_s" in yolos_name:
a_ : int = 3_84
a_ : Any = 15_36
a_ : int = 12
a_ : List[Any] = 6
elif "yolos_b" in yolos_name:
a_ : Any = [8_00, 13_44]
a_ : List[str] = 91
a_ : List[str] = '''huggingface/label-files'''
a_ : str = '''coco-detection-id2label.json'''
a_ : Optional[Any] = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
a_ : Any = {int(__A ): v for k, v in idalabel.items()}
a_ : Dict = idalabel
a_ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : dict , __A : YolosConfig , __A : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : Optional[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
a_ : Optional[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : List[Any] = in_proj_weight[: config.hidden_size, :]
a_ : List[Any] = in_proj_bias[: config.hidden_size]
a_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : List[str] = in_proj_weight[-config.hidden_size :, :]
a_ : List[Any] = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( __A : str ):
if "backbone" in name:
a_ : str = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
a_ : Dict = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
a_ : List[Any] = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
a_ : Union[str, Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
a_ : str = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
a_ : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
a_ : Union[str, Any] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
a_ : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
a_ : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
a_ : str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a_ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a_ : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a_ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
a_ : Dict = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
a_ : Any = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
a_ : Tuple = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _UpperCAmelCase ( __A : dict , __A : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
a_ : Tuple = orig_state_dict.pop(__A )
if "qkv" in key:
a_ : Optional[int] = key.split('''.''' )
a_ : List[str] = int(key_split[2] )
a_ : List[Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
a_ : Optional[Any] = val[:dim, :]
a_ : Optional[int] = val[
dim : dim * 2, :
]
a_ : List[Any] = val[-dim:, :]
else:
a_ : Optional[Any] = val[:dim]
a_ : str = val[dim : dim * 2]
a_ : List[Any] = val[-dim:]
else:
a_ : List[Any] = val
return orig_state_dict
def _UpperCAmelCase ( ):
a_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : Tuple = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : str , __A : str , __A : str , __A : bool = False ):
a_ : str = get_yolos_config(__A )
# load original state_dict
a_ : Tuple = torch.load(__A , map_location='''cpu''' )['''model''']
# load 🤗 model
a_ : Optional[int] = YolosForObjectDetection(__A )
model.eval()
a_ : int = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# Check outputs on an image, prepared by YolosImageProcessor
a_ : Dict = 8_00 if yolos_name != '''yolos_ti''' else 5_12
a_ : List[str] = YolosImageProcessor(format='''coco_detection''' , size=__A )
a_ : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
a_ : Dict = model(**__A )
a_ , a_ : Union[str, Any] = outputs.logits, outputs.pred_boxes
a_ , a_ : List[Any] = None, None
if yolos_name == "yolos_ti":
a_ : Union[str, Any] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
a_ : List[str] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
a_ : Union[str, Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
a_ : Optional[int] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
a_ : Union[str, Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
a_ : Optional[int] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
a_ : Any = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
a_ : Any = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
a_ : int = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
a_ : Tuple = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , __A , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __A , atol=1E-4 )
Path(__A ).mkdir(exist_ok=__A )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if push_to_hub:
a_ : List[Any] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
a_ : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__A , organization='''hustvl''' )
model.push_to_hub(__A , organization='''hustvl''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["pixel_values"]
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : Tuple = size if size is not None else {'''shortest_edge''': 256}
a_ : str = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
a_ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a_ : str = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
a_ : Tuple = do_resize
a_ : str = size
a_ : Any = resample
a_ : Optional[Any] = do_center_crop
a_ : Union[str, Any] = crop_size
a_ : List[Any] = do_rescale
a_ : Dict = rescale_factor
a_ : Tuple = do_normalize
a_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
a_ : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
a_ : str = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> np.ndarray:
a_ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : str ) -> np.ndarray:
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> np.ndarray:
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Union[str, Any]:
a_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
a_ : Tuple = size if size is not None else self.size
a_ : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
a_ : str = resample if resample is not None else self.resample
a_ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : Optional[int] = crop_size if crop_size is not None else self.crop_size
a_ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
a_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
a_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
a_ : str = image_mean if image_mean is not None else self.image_mean
a_ : Union[str, Any] = image_std if image_std is not None else self.image_std
a_ : Dict = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
a_ : Union[str, Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
a_ : List[str] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
a_ : List[Any] = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
a_ : List[Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
a_ : Dict = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
a_ : Optional[Any] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
a_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Tuple] = None ) -> Dict:
a_ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__SCREAMING_SNAKE_CASE ):
a_ : Tuple = target_sizes.numpy()
a_ : str = []
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
a_ : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
a_ : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__SCREAMING_SNAKE_CASE )
else:
a_ : List[str] = logits.argmax(dim=1 )
a_ : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _UpperCAmelCase ( __A : List[Any] ):
a_ : Optional[Any] = tmp_path / '''file.csv'''
a_ : List[Any] = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(__A , '''w''' ) as f:
f.write(__A )
return str(__A )
@pytest.fixture
def _UpperCAmelCase ( __A : int ):
a_ : int = tmp_path / '''malformed_file.csv'''
a_ : Union[str, Any] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(__A , '''w''' ) as f:
f.write(__A )
return str(__A )
@pytest.fixture
def _UpperCAmelCase ( __A : Optional[Any] , __A : Dict ):
a_ : List[str] = tmp_path / '''csv_with_image.csv'''
a_ : Dict = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(__A , '''w''' ) as f:
f.write(__A )
return str(__A )
@pytest.fixture
def _UpperCAmelCase ( __A : Optional[Any] ):
a_ : Dict = tmp_path / '''csv_with_label.csv'''
a_ : Dict = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(__A , '''w''' ) as f:
f.write(__A )
return str(__A )
@pytest.fixture
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = tmp_path / '''csv_with_int_list.csv'''
a_ : str = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(__A , '''w''' ) as f:
f.write(__A )
return str(__A )
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Optional[int] , __A : List[Any] ):
a_ : Optional[int] = Csv()
a_ : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__A , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(__A ) in record.message
for record in caplog.records )
@require_pil
def _UpperCAmelCase ( __A : str ):
with open(__A , encoding='''utf-8''' ) as f:
a_ : Tuple = f.read().splitlines()[1]
a_ : Union[str, Any] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
a_ : Optional[int] = csv._generate_tables([[csv_file_with_image]] )
a_ : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
a_ : str = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _UpperCAmelCase ( __A : List[str] ):
with open(__A , encoding='''utf-8''' ) as f:
a_ : int = f.read().splitlines()[1:]
a_ : str = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
a_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
a_ : str = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
a_ : Dict = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(__A ) for label in labels]
def _UpperCAmelCase ( __A : str ):
a_ : Optional[int] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda __A : [int(__A ) for i in x.split()]} )
a_ : Union[str, Any] = csv._generate_tables([[csv_file_with_int_list]] )
a_ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
a_ : List[Any] = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict=0.2 , __SCREAMING_SNAKE_CASE : str=0.2 ) -> Any:
a_ : int = bp_numa
a_ : Dict = bp_numa
a_ : Union[str, Any] = bp_numa
a_ : List[Any] = conva_get[:2]
a_ : Optional[Any] = conva_get[2]
a_ : List[str] = size_pa
a_ : Optional[int] = rate_w
a_ : List[Any] = rate_t
a_ : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
a_ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a_ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a_ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
a_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
a_ : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
# save model dict with pickle
a_ : Optional[int] = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as f:
pickle.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(f'Model saved: {save_path}' )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
# read saved model
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
a_ : Any = pickle.load(__SCREAMING_SNAKE_CASE ) # noqa: S301
a_ : Any = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
a_ : Union[str, Any] = model_dic.get('''size_pooling1''' )
a_ : Any = model_dic.get('''num_bp1''' )
a_ : List[Any] = model_dic.get('''num_bp2''' )
a_ : List[str] = model_dic.get('''num_bp3''' )
a_ : List[Any] = model_dic.get('''rate_weight''' )
a_ : List[str] = model_dic.get('''rate_thre''' )
# create model instance
a_ : Tuple = CNN(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# modify model parameter
a_ : List[Any] = model_dic.get('''w_conv1''' )
a_ : Optional[Any] = model_dic.get('''wkj''' )
a_ : int = model_dic.get('''vji''' )
a_ : str = model_dic.get('''thre_conv1''' )
a_ : List[Any] = model_dic.get('''thre_bp2''' )
a_ : Any = model_dic.get('''thre_bp3''' )
return conv_ins
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
return 1 / (1 + np.exp(-1 * x ))
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
return round(__SCREAMING_SNAKE_CASE , 3 )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
# convolution process
a_ : Dict = convs[0]
a_ : List[Any] = convs[1]
a_ : Dict = np.shape(__SCREAMING_SNAKE_CASE )[0]
# get the data slice of original image data, data_focus
a_ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , __SCREAMING_SNAKE_CASE ):
for j_focus in range(0 , size_data - size_conv + 1 , __SCREAMING_SNAKE_CASE ):
a_ : Dict = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__SCREAMING_SNAKE_CASE )
# calculate the feature map of every single kernel, and saved as list of matrix
a_ : Union[str, Any] = []
a_ : str = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__SCREAMING_SNAKE_CASE ):
a_ : Union[str, Any] = []
for i_focus in range(len(__SCREAMING_SNAKE_CASE ) ):
a_ : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__SCREAMING_SNAKE_CASE ) )
a_ : str = np.asmatrix(__SCREAMING_SNAKE_CASE ).reshape(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
data_featuremap.append(__SCREAMING_SNAKE_CASE )
# expanding the data slice to One dimenssion
a_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__SCREAMING_SNAKE_CASE ) )
a_ : int = np.asarray(__SCREAMING_SNAKE_CASE )
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str="average_pool" ) -> Any:
# pooling process
a_ : Any = len(featuremaps[0] )
a_ : Union[str, Any] = int(size_map / size_pooling )
a_ : str = []
for i_map in range(len(__SCREAMING_SNAKE_CASE ) ):
a_ : Union[str, Any] = featuremaps[i_map]
a_ : Optional[Any] = []
for i_focus in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for j_focus in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__SCREAMING_SNAKE_CASE ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = np.asmatrix(__SCREAMING_SNAKE_CASE ).reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
featuremap_pooled.append(__SCREAMING_SNAKE_CASE )
return featuremap_pooled
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
# expanding three dimension data to one dimension list
a_ : Optional[Any] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
a_ : Optional[int] = np.shape(data[i] )
a_ : List[str] = data[i].reshape(1 , shapes[0] * shapes[1] )
a_ : List[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__SCREAMING_SNAKE_CASE )
a_ : int = np.asarray(__SCREAMING_SNAKE_CASE )
return data_expanded
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
# expanding matrix to one dimension list
a_ : str = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : List[str] = np.shape(__SCREAMING_SNAKE_CASE )
a_ : Tuple = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> str:
a_ : Tuple = []
a_ : Dict = 0
for i_map in range(__SCREAMING_SNAKE_CASE ):
a_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for j in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a_ : Optional[Any] = pd_pool[
i_pool
]
a_ : Dict = i_pool + 1
a_ : Tuple = np.multiply(
__SCREAMING_SNAKE_CASE , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__SCREAMING_SNAKE_CASE )
return pd_all
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=bool ) -> int:
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(__SCREAMING_SNAKE_CASE )) )
print((''' - - Shape: Teach_Data ''', np.shape(__SCREAMING_SNAKE_CASE )) )
a_ : int = 0
a_ : Any = []
a_ : Any = 1_0000
while rp < n_repeat and mse >= error_accuracy:
a_ : Optional[Any] = 0
print(f'-------------Learning Time {rp}--------------' )
for p in range(len(__SCREAMING_SNAKE_CASE ) ):
# print('------------Learning Image: %d--------------'%p)
a_ : List[Any] = np.asmatrix(datas_train[p] )
a_ : str = np.asarray(datas_teach[p] )
a_ , a_ : Optional[Any] = self.convolute(
__SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a_ : str = self.pooling(__SCREAMING_SNAKE_CASE , self.size_poolinga )
a_ : Optional[int] = np.shape(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = self._expand(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = data_bp_input
a_ : Optional[Any] = np.dot(__SCREAMING_SNAKE_CASE , self.vji.T ) - self.thre_bpa
a_ : Any = self.sig(__SCREAMING_SNAKE_CASE )
a_ : Tuple = np.dot(__SCREAMING_SNAKE_CASE , self.wkj.T ) - self.thre_bpa
a_ : List[str] = self.sig(__SCREAMING_SNAKE_CASE )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a_ : Any = np.multiply(
(data_teach - bp_outa) , np.multiply(__SCREAMING_SNAKE_CASE , (1 - bp_outa) ) )
a_ : List[str] = np.multiply(
np.dot(__SCREAMING_SNAKE_CASE , self.wkj ) , np.multiply(__SCREAMING_SNAKE_CASE , (1 - bp_outa) ) )
a_ : List[str] = np.dot(__SCREAMING_SNAKE_CASE , self.vji )
a_ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
a_ : str = pd_conva_pooled.T.getA().tolist()
a_ : List[Any] = self._calculate_gradient_from_pool(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
a_ : int = self._expand_mat(pd_conva_all[k_conv] )
a_ : Tuple = self.rate_weight * np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
a_ : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
a_ : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a_ : Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a_ : Dict = self.thre_bpa - pd_k_all * self.rate_thre
a_ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a_ : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a_ : Union[str, Any] = rp + 1
a_ : str = error_count / patterns
all_mse.append(__SCREAMING_SNAKE_CASE )
def draw_error():
a_ : int = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__SCREAMING_SNAKE_CASE , '''+-''' )
plt.plot(__SCREAMING_SNAKE_CASE , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(__SCREAMING_SNAKE_CASE , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Dict:
# model predict
a_ : List[Any] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(__SCREAMING_SNAKE_CASE )) )
for p in range(len(__SCREAMING_SNAKE_CASE ) ):
a_ : Tuple = np.asmatrix(datas_test[p] )
a_ , a_ : Optional[int] = self.convolute(
__SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a_ : Union[str, Any] = self.pooling(__SCREAMING_SNAKE_CASE , self.size_poolinga )
a_ : str = self._expand(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = data_bp_input
a_ : Any = bp_outa * self.vji.T - self.thre_bpa
a_ : Optional[int] = self.sig(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
a_ : List[Any] = self.sig(__SCREAMING_SNAKE_CASE )
produce_out.extend(bp_outa.getA().tolist() )
a_ : Optional[int] = [list(map(self.do_round , __SCREAMING_SNAKE_CASE ) ) for each in produce_out]
return np.asarray(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
# return the data of image after convoluting process so we can check it out
a_ : str = np.asmatrix(__SCREAMING_SNAKE_CASE )
a_ , a_ : Optional[Any] = self.convolute(
__SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a_ : int = self.pooling(__SCREAMING_SNAKE_CASE , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int | float] , __A : int , __A : int ):
if len(__A ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(__A )
or left < -len(__A )
or right >= len(__A )
or right < -len(__A )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
a_ : Dict = (left + right) >> 1 # the middle
a_ : Any = find_max(__A , __A , __A ) # find max in range[left, mid]
a_ : str = find_max(__A , mid + 1 , __A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowerCAmelCase = 'bart'
__lowerCAmelCase = True
@st.cache(allow_output_mutation=__A )
def _UpperCAmelCase ( ):
if LOAD_DENSE_INDEX:
a_ : Tuple = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a_ : Tuple = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a_ : List[str] = qar_model.eval()
else:
a_ , a_ : str = (None, None)
if MODEL_TYPE == "bart":
a_ : List[Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a_ : int = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a_ : List[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a_ : str = sas_model.eval()
else:
a_ , a_ : Optional[int] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__A )
def _UpperCAmelCase ( ):
if LOAD_DENSE_INDEX:
a_ : Optional[int] = faiss.StandardGpuResources()
a_ : List[str] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a_ : Optional[int] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
a_ : Optional[int] = faiss.IndexFlatIP(1_28 )
a_ : Dict = faiss.index_cpu_to_gpu(__A , 1 , __A )
wikiaab_gpu_index_flat.add(__A ) # TODO fix for larger GPU
else:
a_ , a_ : List[Any] = (None, None)
a_ : Union[str, Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__A )
def _UpperCAmelCase ( ):
a_ : List[str] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a_ : str = elia['''train_eli5''']
a_ : int = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
a_ : Union[str, Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__A )
return (elia_train, eli5_train_q_index)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = load_indexes()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = load_models()
__lowerCAmelCase , __lowerCAmelCase = load_train_data()
def _UpperCAmelCase ( __A : Tuple , __A : List[str]=10 ):
a_ : List[Any] = embed_questions_for_retrieval([question] , __A , __A )
a_ , a_ : Dict = eli5_train_q_index.search(__A , __A )
a_ : Tuple = [elia_train[int(__A )] for i in I[0]]
return nn_examples
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Optional[Any]="wiki40b" , __A : List[Any]="dense" , __A : List[Any]=10 ):
if source == "none":
a_ , a_ : List[Any] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a_ , a_ : List[Any] = query_qa_dense_index(
__A , __A , __A , __A , __A , __A )
else:
a_ , a_ : Optional[Any] = query_es_index(
__A , __A , index_name='''english_wiki40b_snippets_100w''' , n_results=__A , )
a_ : int = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a_ : str = '''question: {} context: {}'''.format(__A , __A )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __A : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __A : None),
} )
def _UpperCAmelCase ( __A : Optional[int] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[str]=64 , __A : Optional[Any]=2_56 , __A : int=False , __A : Optional[Any]=2 , __A : int=0.95 , __A : Optional[Any]=0.8 ):
with torch.no_grad():
a_ : Optional[Any] = qa_sas_generate(
__A , __A , __A , num_answers=1 , num_beams=__A , min_len=__A , max_len=__A , do_sample=__A , temp=__A , top_p=__A , top_k=__A , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowerCAmelCase = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowerCAmelCase = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowerCAmelCase = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowerCAmelCase = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowerCAmelCase = st.sidebar.checkbox('Demo options')
if demo_options:
__lowerCAmelCase = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowerCAmelCase = action_list.index(action_st)
__lowerCAmelCase = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowerCAmelCase = show_type == 'Show full text of passages'
else:
__lowerCAmelCase = 3
__lowerCAmelCase = True
__lowerCAmelCase = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowerCAmelCase = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowerCAmelCase = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowerCAmelCase = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowerCAmelCase = 'wiki40b'
__lowerCAmelCase = 'dense'
__lowerCAmelCase = 'beam'
__lowerCAmelCase = 2
__lowerCAmelCase = 64
__lowerCAmelCase = 256
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = st.sidebar.checkbox('Generation options')
if generate_options:
__lowerCAmelCase = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowerCAmelCase = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowerCAmelCase = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__lowerCAmelCase = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__lowerCAmelCase = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowerCAmelCase = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowerCAmelCase = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowerCAmelCase = None
# start main text
__lowerCAmelCase = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowerCAmelCase = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowerCAmelCase = st.text_input('Enter your question here:', '')
else:
__lowerCAmelCase = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowerCAmelCase , __lowerCAmelCase = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowerCAmelCase , __lowerCAmelCase = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowerCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowerCAmelCase = support_list[:10]
__lowerCAmelCase = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowerCAmelCase , __lowerCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowerCAmelCase , __lowerCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowerCAmelCase = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowerCAmelCase = res[1].strip()
if sec_titles == "":
__lowerCAmelCase = '[{}]({})'.format(res[0], wiki_url)
else:
__lowerCAmelCase = sec_titles.split(' & ')
__lowerCAmelCase = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowerCAmelCase = find_nearest_training(question)
__lowerCAmelCase = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowerCAmelCase = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowerCAmelCase = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase = False
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = 'ybelkada/fonts'
def _UpperCAmelCase ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _UpperCAmelCase ( __A : Any , __A : Union[str, Any] , __A : int ):
requires_backends(__A , ['''torch'''] )
_check_torch_version()
a_ : Union[str, Any] = image_tensor.unsqueeze(0 )
a_ : List[str] = torch.nn.functional.unfold(__A , (patch_height, patch_width) , stride=(patch_height, patch_width) )
a_ : List[str] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __A , __A , -1 )
a_ : Any = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _UpperCAmelCase ( __A : str , __A : int = 36 , __A : str = "black" , __A : str = "white" , __A : int = 5 , __A : int = 5 , __A : int = 5 , __A : int = 5 , __A : Optional[bytes] = None , __A : Optional[str] = None , ):
requires_backends(__A , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
a_ : List[Any] = textwrap.TextWrapper(width=80 )
a_ : int = wrapper.wrap(text=__A )
a_ : Optional[Any] = '''\n'''.join(__A )
if font_bytes is not None and font_path is None:
a_ : List[Any] = io.BytesIO(__A )
elif font_path is not None:
a_ : Any = font_path
else:
a_ : int = hf_hub_download(__A , '''Arial.TTF''' )
a_ : Optional[Any] = ImageFont.truetype(__A , encoding='''UTF-8''' , size=__A )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
a_ : Tuple = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , __A ) )
a_ , a_ , a_ , a_ : Optional[Any] = temp_draw.textbbox((0, 0) , __A , __A )
# Create the actual image with a bit of padding around the text.
a_ : Tuple = text_width + left_padding + right_padding
a_ : int = text_height + top_padding + bottom_padding
a_ : Union[str, Any] = Image.new('''RGB''' , (image_width, image_height) , __A )
a_ : Dict = ImageDraw.Draw(__A )
draw.text(xy=(left_padding, top_padding) , text=__A , fill=__A , font=__A )
return image
def _UpperCAmelCase ( __A : np.ndarray , __A : str , **__A : int ):
requires_backends(__A , '''vision''' )
# Convert to PIL image if necessary
a_ : Dict = to_pil_image(__A )
a_ : int = render_text(__A , **__A )
a_ : List[Any] = max(header_image.width , image.width )
a_ : List[Any] = int(image.height * (new_width / image.width) )
a_ : Optional[int] = int(header_image.height * (new_width / header_image.width) )
a_ : Union[str, Any] = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
a_ : Optional[int] = to_numpy_array(__A )
if infer_channel_dimension_format(__A ) == ChannelDimension.LAST:
a_ : Optional[Any] = to_channel_dimension_format(__A , ChannelDimension.LAST )
return new_image
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["flattened_patches"]
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : int = 2048 , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
a_ : int = do_normalize
a_ : Any = do_convert_rgb
a_ : List[Any] = max_patches
a_ : Any = is_vqa
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : dict , **__SCREAMING_SNAKE_CASE : List[Any] ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
a_ : str = to_channel_dimension_format(__SCREAMING_SNAKE_CASE , ChannelDimension.FIRST )
a_ : Any = torch.from_numpy(__SCREAMING_SNAKE_CASE )
a_ , a_ : int = patch_size['''height'''], patch_size['''width''']
a_ , a_ : Dict = get_image_size(__SCREAMING_SNAKE_CASE )
# maximize scale s.t.
a_ : Optional[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
a_ : Optional[int] = max(min(math.floor(scale * image_height / patch_height ) , __SCREAMING_SNAKE_CASE ) , 1 )
a_ : List[Any] = max(min(math.floor(scale * image_width / patch_width ) , __SCREAMING_SNAKE_CASE ) , 1 )
a_ : List[Any] = max(num_feasible_rows * patch_height , 1 )
a_ : Dict = max(num_feasible_cols * patch_width , 1 )
a_ : int = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE , antialias=__SCREAMING_SNAKE_CASE , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
a_ : Optional[int] = torch_extract_patches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : int = patches.shape
a_ : List[Any] = patches_shape[1]
a_ : str = patches_shape[2]
a_ : Any = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
a_ : int = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
a_ : List[Any] = torch.arange(__SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , __SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
a_ : List[Any] = torch.arange(__SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(__SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
a_ : Union[str, Any] = row_ids.to(torch.floataa )
a_ : Optional[int] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
a_ : Dict = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
a_ : Optional[Any] = torch.nn.functional.pad(__SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float()
a_ : Tuple = to_numpy_array(__SCREAMING_SNAKE_CASE )
return result
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> np.ndarray:
if image.dtype == np.uinta:
a_ : Dict = image.astype(np.floataa )
# take mean across the whole `image`
a_ : Any = np.mean(__SCREAMING_SNAKE_CASE )
a_ : Any = np.std(__SCREAMING_SNAKE_CASE )
a_ : Dict = max(__SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : List[str] , ) -> ImageInput:
a_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
a_ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ : Dict = patch_size if patch_size is not None else self.patch_size
a_ : Union[str, Any] = max_patches if max_patches is not None else self.max_patches
a_ : Union[str, Any] = self.is_vqa
if kwargs.get('''data_format''' , __SCREAMING_SNAKE_CASE ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
a_ : int = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ : Optional[Any] = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
a_ : str = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
a_ : int = kwargs.pop('''font_bytes''' , __SCREAMING_SNAKE_CASE )
a_ : Tuple = kwargs.pop('''font_path''' , __SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a_ : str = [header_text] * len(__SCREAMING_SNAKE_CASE )
a_ : int = [
render_header(__SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=__SCREAMING_SNAKE_CASE , font_path=__SCREAMING_SNAKE_CASE )
for i, image in enumerate(__SCREAMING_SNAKE_CASE )
]
if do_normalize:
a_ : int = [self.normalize(image=__SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
a_ : Tuple = [
self.extract_flattened_patches(image=__SCREAMING_SNAKE_CASE , max_patches=__SCREAMING_SNAKE_CASE , patch_size=__SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
a_ : int = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
a_ : Dict = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_outputs
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[str]:
# Input as list
a_ : Tuple = list(poly_a or [0] )[:]
a_ : int = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
a_ : str = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
a_ : Tuple = len(self.polyB )
# Add 0 to make lengths equal a power of 2
a_ : Tuple = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
a_ : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
a_ : int = self.__multiply()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
a_ : Tuple = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return dft[0]
#
a_ : Union[str, Any] = self.c_max_length // 2
while next_ncol > 0:
a_ : List[str] = [[] for i in range(__SCREAMING_SNAKE_CASE )]
a_ : Optional[Any] = self.root**next_ncol
# First half of next step
a_ : Optional[int] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__SCREAMING_SNAKE_CASE ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
a_ : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__SCREAMING_SNAKE_CASE ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
a_ : Any = new_dft
a_ : str = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : List[Any] = self.__dft('''A''' )
a_ : Dict = self.__dft('''B''' )
a_ : Union[str, Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
a_ : List[Any] = 2
while next_ncol <= self.c_max_length:
a_ : int = [[] for i in range(__SCREAMING_SNAKE_CASE )]
a_ : Tuple = self.root ** (next_ncol // 2)
a_ : Dict = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
a_ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
a_ : Optional[int] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[int] ) -> List[Any]:
a_ : Any = '''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
a_ : Any = '''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
a_ : Optional[Any] = '''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=99 , __SCREAMING_SNAKE_CASE : List[Any]=64 , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : List[Any]=5 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : List[str]=512 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=None , ) -> Dict:
a_ : int = parent
a_ : List[Any] = batch_size
a_ : Tuple = seq_length
a_ : int = is_training
a_ : Dict = use_input_mask
a_ : Optional[int] = use_token_type_ids
a_ : Optional[int] = use_labels
a_ : Dict = vocab_size
a_ : Union[str, Any] = hidden_size
a_ : Optional[Any] = embedding_size
a_ : int = num_hidden_layers
a_ : str = num_attention_heads
a_ : Optional[int] = intermediate_size
a_ : List[Any] = hidden_act
a_ : str = hidden_dropout_prob
a_ : List[str] = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : Optional[int] = type_sequence_label_size
a_ : Dict = initializer_range
a_ : str = num_labels
a_ : int = num_choices
a_ : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : List[Any] = None
if self.use_input_mask:
a_ : int = random_attention_mask([self.batch_size, self.seq_length] )
a_ : str = None
if self.use_token_type_ids:
a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : Optional[Any] = None
a_ : Optional[Any] = None
a_ : Union[str, Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : int = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
a_ : List[str] = MobileBertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Tuple = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
a_ : Any = MobileBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Dict = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> Any:
a_ : int = MobileBertForNextSentencePrediction(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Union[str, Any] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
a_ : Dict = MobileBertForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Union[str, Any] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , next_sentence_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
a_ : List[str] = MobileBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Any = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
a_ : str = self.num_labels
a_ : Tuple = MobileBertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ) -> List[Any]:
a_ : Optional[int] = self.num_labels
a_ : Tuple = MobileBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
a_ : Dict = self.num_choices
a_ : List[str] = MobileBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : List[str] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
a_ : Tuple = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[str] = config_and_inputs
a_ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=False ) -> List[str]:
a_ : int = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
a_ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
a_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
a_ : Dict = MobileBertModelTester(self )
a_ : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( __A : Dict ):
return torch.tensor(
__A , dtype=torch.long , device=__A , )
__lowerCAmelCase = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Union[str, Any] = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(__SCREAMING_SNAKE_CASE )
a_ : Dict = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a_ : List[str] = model(__SCREAMING_SNAKE_CASE )[0]
a_ : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
a_ : List[str] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
] , device=__SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a_ : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a_ : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "mobilenet_v2"
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=224 , __SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=8 , __SCREAMING_SNAKE_CASE : str=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=6 , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Any="relu6" , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=0.8 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=0.001 , __SCREAMING_SNAKE_CASE : str=255 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> int:
super().__init__(**__SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
a_ : List[str] = num_channels
a_ : Optional[Any] = image_size
a_ : List[str] = depth_multiplier
a_ : Any = depth_divisible_by
a_ : Optional[int] = min_depth
a_ : List[str] = expand_ratio
a_ : Union[str, Any] = output_stride
a_ : Any = first_layer_is_expansion
a_ : Union[str, Any] = finegrained_output
a_ : Tuple = hidden_act
a_ : Optional[Any] = tf_padding
a_ : Optional[int] = classifier_dropout_prob
a_ : Optional[Any] = initializer_range
a_ : Union[str, Any] = layer_norm_eps
a_ : Any = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Dict ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( __A : Optional[int] , __A : str , __A : Dict , __A : List[Any] , __A : List[Any] ):
for attribute in key.split('''.''' ):
a_ : int = getattr(__A , __A )
if weight_type is not None:
a_ : Tuple = getattr(__A , __A ).shape
else:
a_ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
a_ : Optional[int] = value
elif weight_type == "weight_g":
a_ : List[Any] = value
elif weight_type == "weight_v":
a_ : Any = value
elif weight_type == "bias":
a_ : List[Any] = value
elif weight_type == "running_mean":
a_ : str = value
elif weight_type == "running_var":
a_ : List[str] = value
elif weight_type == "num_batches_tracked":
a_ : Optional[Any] = value
elif weight_type == "inv_freq":
a_ : Union[str, Any] = value
else:
a_ : Tuple = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _UpperCAmelCase ( __A : str , __A : Union[str, Any] , __A : int ):
a_ : List[str] = []
a_ : List[str] = fairseq_model.state_dict()
a_ : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
a_ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
a_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
a_ : Union[str, Any] = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a_ : Tuple = True
if "*" in mapped_key:
a_ : Any = name.split(__A )[0].split('''.''' )[-2]
a_ : Tuple = mapped_key.replace('''*''' , __A )
if "pos_bias_u" in name:
a_ : List[Any] = None
elif "pos_bias_v" in name:
a_ : List[Any] = None
elif "weight_g" in name:
a_ : str = '''weight_g'''
elif "weight_v" in name:
a_ : List[Any] = '''weight_v'''
elif "bias" in name:
a_ : List[str] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ : List[Any] = '''weight'''
elif "running_mean" in name:
a_ : Any = '''running_mean'''
elif "inv_freq" in name:
a_ : List[str] = '''inv_freq'''
elif "running_var" in name:
a_ : Optional[int] = '''running_var'''
elif "num_batches_tracked" in name:
a_ : Optional[Any] = '''num_batches_tracked'''
else:
a_ : Dict = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f'Unused weights: {unused_weights}' )
def _UpperCAmelCase ( __A : str , __A : int , __A : int , __A : List[str] , __A : int ):
a_ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
a_ : Optional[Any] = name.split('''.''' )
a_ : List[str] = int(items[0] )
a_ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a_ : List[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a_ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
a_ : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
a_ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
@torch.no_grad()
def _UpperCAmelCase ( __A : str , __A : int , __A : Dict=None , __A : Tuple=None , __A : int=True ):
if config_path is not None:
a_ : Dict = WavaVecaConformerConfig.from_pretrained(__A , hidden_act='''swish''' )
else:
a_ : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
a_ : Union[str, Any] = '''rotary'''
if is_finetuned:
if dict_path:
a_ : Tuple = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ : Dict = target_dict.pad_index
a_ : Union[str, Any] = target_dict.bos_index
a_ : Dict = target_dict.eos_index
a_ : Union[str, Any] = len(target_dict.symbols )
a_ : str = os.path.join(__A , '''vocab.json''' )
if not os.path.isdir(__A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
a_ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ : Union[str, Any] = 0
a_ : Dict = 1
with open(__A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__A , __A )
a_ : Any = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__A , )
a_ : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
a_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
a_ : Dict = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
a_ : Dict = WavaVecaConformerForCTC(__A )
else:
a_ : Any = WavaVecaConformerForPreTraining(__A )
if is_finetuned:
a_ , a_ , a_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
a_ : str = argparse.Namespace(task='''audio_pretraining''' )
a_ : str = fairseq.tasks.setup_task(__A )
a_ , a_ , a_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__A )
a_ : Optional[int] = model[0].eval()
recursively_load_weights(__A , __A , not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = RobertaTokenizer
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str="replace" , __SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , __SCREAMING_SNAKE_CASE : Optional[int]="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="<s>" , __SCREAMING_SNAKE_CASE : int="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : int=True , **__SCREAMING_SNAKE_CASE : str , ) -> Optional[Any]:
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
a_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
a_ : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
a_ : str = add_prefix_space
a_ : Dict = pre_tok_class(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = add_prefix_space
a_ : str = '''post_processor'''
a_ : Any = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
a_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a_ : List[str] = tuple(state['''sep'''] )
if "cls" in state:
a_ : List[str] = tuple(state['''cls'''] )
a_ : Any = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
a_ : Union[str, Any] = add_prefix_space
a_ : Optional[int] = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
a_ : Optional[int] = trim_offsets
a_ : List[Any] = True
if changes_to_apply:
a_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
a_ : str = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
a_ : List[str] = value
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[str] ) -> BatchEncoding:
a_ : List[str] = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> BatchEncoding:
a_ : str = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
a_ : str = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=None ) -> List[Any]:
a_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : int = [self.sep_token_id]
a_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "visual_bert"
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : str=3_0522 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : List[Any]=3072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=512 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Dict=1e-12 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : List[Any]=2 , **__SCREAMING_SNAKE_CASE : int , ) -> str:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : List[str] = vocab_size
a_ : Dict = max_position_embeddings
a_ : int = hidden_size
a_ : List[str] = visual_embedding_dim
a_ : str = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Dict = intermediate_size
a_ : Optional[int] = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : Optional[Any] = initializer_range
a_ : int = type_vocab_size
a_ : Union[str, Any] = layer_norm_eps
a_ : List[Any] = bypass_transformer
a_ : Union[str, Any] = special_visual_initialize
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
import json
import sys
def _UpperCAmelCase ( __A : Tuple , __A : Optional[int] ):
with open(__A , encoding='''utf-8''' ) as f:
a_ : List[str] = json.load(__A )
a_ : Tuple = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(__A ):
a_ : Tuple = results[benchmark_name]
a_ : List[str] = benchmark_name.split('''/''' )[-1]
output_md.append(f'### Benchmark: {benchmark_file_name}' )
a_ : Optional[Any] = '''| metric |'''
a_ : Dict = '''|--------|'''
a_ : Dict = '''| new / old (diff) |'''
for metric_name in sorted(__A ):
a_ : List[str] = benchmark_res[metric_name]
a_ : Union[str, Any] = metric_vals['''new''']
a_ : List[Any] = metric_vals.get('''old''' , __A )
a_ : Any = metric_vals.get('''diff''' , __A )
a_ : Any = f' {new_val:f}' if isinstance(__A , (int, float) ) else '''None'''
if old_val is not None:
val_str += f' / {old_val:f}' if isinstance(__A , (int, float) ) else "None"
if dif_val is not None:
val_str += f' ({dif_val:f})' if isinstance(__A , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(__A ) )
if __name__ == "__main__":
__lowerCAmelCase = sys.argv[1]
__lowerCAmelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : int , __A : int ):
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
a_ : List[str] = number_of_bytes // partitions
a_ : List[str] = []
for i in range(__A ):
a_ : int = i * bytes_per_partition + 1
a_ : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : Tuple , __A : Any ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__A ):
for j in range(__A ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def _UpperCAmelCase ( __A : Dict , __A : Optional[int] ):
a_ : Dict = [[float('''inf''' ) for _ in range(__A )] for _ in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Tuple = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__A ):
# looping through rows of graph array
for i in range(__A ):
# looping through columns of graph array
for j in range(__A ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
a_ : Tuple = dist[i][k] + dist[k][j]
_print_dist(__A , __A )
return dist, v
if __name__ == "__main__":
__lowerCAmelCase = int(input('Enter number of vertices: '))
__lowerCAmelCase = int(input('Enter number of edges: '))
__lowerCAmelCase = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
__lowerCAmelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
__lowerCAmelCase = int(input('Enter source:'))
__lowerCAmelCase = int(input('Enter destination:'))
__lowerCAmelCase = float(input('Enter weight:'))
__lowerCAmelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 666 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int ):
if not isinstance(__A , __A ):
raise TypeError('''Input value must be an \'int\' type''' )
a_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = KandinskyVaaPriorPipeline
snake_case__ = ["prompt"]
snake_case__ = ["prompt", "negative_prompt"]
snake_case__ = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return 32
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
return 32
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return 100
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
torch.manual_seed(0 )
a_ : str = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a_ : Dict = PriorTransformer(**__SCREAMING_SNAKE_CASE )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a_ : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a_ : Optional[int] = CLIPVisionModelWithProjection(__SCREAMING_SNAKE_CASE )
return model
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE , do_resize=__SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Dict = self.dummy_prior
a_ : int = self.dummy_image_encoder
a_ : List[str] = self.dummy_text_encoder
a_ : Tuple = self.dummy_tokenizer
a_ : int = self.dummy_image_processor
a_ : Optional[int] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , )
a_ : Optional[Any] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 ) -> Tuple:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : int = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : str = '''cpu'''
a_ : str = self.get_dummy_components()
a_ : Dict = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = output.image_embeds
a_ : Optional[int] = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
a_ : Any = image[0, -10:]
a_ : List[str] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a_ : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : Union[str, Any] = torch_device == '''cpu'''
a_ : Optional[Any] = True
a_ : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=__SCREAMING_SNAKE_CASE , relax_max_difference=__SCREAMING_SNAKE_CASE , test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , )
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
a_ : str = torch_device == '''cpu'''
a_ : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__SCREAMING_SNAKE_CASE , test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , )
| 666 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 1 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowerCAmelCase = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def _UpperCAmelCase ( __A : str = "dhaka" , __A : int = 5 ):
a_ : Union[str, Any] = min(__A , 50 ) # Prevent abuse!
a_ : Optional[int] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
a_ : List[str] = requests.get('''https://www.google.com/search''' , params=__A , headers=__A )
a_ : Tuple = BeautifulSoup(html.text , '''html.parser''' )
a_ : int = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
a_ : int = json.dumps(__A )
a_ : List[str] = json.loads(__A )
a_ : Any = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , __A , )
if not matched_google_image_data:
return 0
a_ : List[Any] = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(__A ) , )
a_ : List[Any] = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , __A , )
for index, fixed_full_res_image in enumerate(__A ):
if index >= max_images:
return index
a_ : List[Any] = bytes(__A , '''ascii''' ).decode(
'''unicode-escape''' )
a_ : List[str] = bytes(__A , '''ascii''' ).decode(
'''unicode-escape''' )
a_ : Dict = urllib.request.build_opener()
a_ : List[str] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(__A )
a_ : Any = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(__A ):
os.makedirs(__A )
urllib.request.urlretrieve( # noqa: S310
__A , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__lowerCAmelCase = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
a_ : int = tempfile.mkdtemp()
a_ : str = SamImageProcessor()
a_ : Optional[int] = SamProcessor(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a_ : List[str] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
a_ : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[int] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
a_ : Dict = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
a_ : List[Any] = self.get_image_processor()
a_ : List[Any] = SamProcessor(image_processor=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.prepare_image_inputs()
a_ : int = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
a_ : Union[str, Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : Tuple = self.get_image_processor()
a_ : List[str] = SamProcessor(image_processor=__SCREAMING_SNAKE_CASE )
a_ : Any = [torch.ones((1, 3, 5, 5) )]
a_ : Tuple = [[1764, 2646]]
a_ : List[str] = [[683, 1024]]
a_ : Dict = processor.post_process_masks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
a_ : int = processor.post_process_masks(
__SCREAMING_SNAKE_CASE , torch.tensor(__SCREAMING_SNAKE_CASE ) , torch.tensor(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
a_ : Dict = [np.ones((1, 3, 5, 5) )]
a_ : int = processor.post_process_masks(__SCREAMING_SNAKE_CASE , np.array(__SCREAMING_SNAKE_CASE ) , np.array(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
a_ : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
a_ : Dict = processor.post_process_masks(__SCREAMING_SNAKE_CASE , np.array(__SCREAMING_SNAKE_CASE ) , np.array(__SCREAMING_SNAKE_CASE ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : Any = tempfile.mkdtemp()
a_ : str = SamImageProcessor()
a_ : Optional[Any] = SamProcessor(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict , **__SCREAMING_SNAKE_CASE : str ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a_ : List[str] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ : Union[str, Any] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
a_ : Any = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : str = self.get_image_processor()
a_ : List[str] = SamProcessor(image_processor=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = self.prepare_image_inputs()
a_ : Union[str, Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
a_ : str = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Optional[int] = self.get_image_processor()
a_ : str = SamProcessor(image_processor=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = [tf.ones((1, 3, 5, 5) )]
a_ : Optional[Any] = [[1764, 2646]]
a_ : Union[str, Any] = [[683, 1024]]
a_ : Optional[int] = processor.post_process_masks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
a_ : List[Any] = processor.post_process_masks(
__SCREAMING_SNAKE_CASE , tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) , tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
a_ : int = [np.ones((1, 3, 5, 5) )]
a_ : int = processor.post_process_masks(
__SCREAMING_SNAKE_CASE , np.array(__SCREAMING_SNAKE_CASE ) , np.array(__SCREAMING_SNAKE_CASE ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
a_ : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
a_ : Optional[Any] = processor.post_process_masks(
__SCREAMING_SNAKE_CASE , np.array(__SCREAMING_SNAKE_CASE ) , np.array(__SCREAMING_SNAKE_CASE ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : List[Any] = tempfile.mkdtemp()
a_ : Dict = SamImageProcessor()
a_ : List[str] = SamProcessor(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Tuple ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a_ : List[str] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : int = self.get_image_processor()
a_ : Tuple = SamProcessor(image_processor=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
a_ : List[str] = [tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )]
a_ : Optional[Any] = [torch.tensor(__SCREAMING_SNAKE_CASE )]
a_ : int = [[1764, 2646]]
a_ : Any = [[683, 1024]]
a_ : Dict = processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
a_ : Tuple = processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : Optional[Any] = self.get_image_processor()
a_ : Optional[Any] = SamProcessor(image_processor=__SCREAMING_SNAKE_CASE )
a_ : str = self.prepare_image_inputs()
a_ : Dict = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )['''pixel_values'''].numpy()
a_ : List[str] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )['''pixel_values'''].numpy()
a_ : List[Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )['''pixel_values'''].numpy()
a_ : Any = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : Tuple = 1
a_ : Optional[Any] = 3
a_ : List[Any] = (32, 32)
a_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
return image
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
torch.manual_seed(0 )
a_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
torch.manual_seed(0 )
a_ : Tuple = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
def extract(*__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Any ):
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] ) -> int:
a_ : List[str] = torch.ones([0] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
self.pixel_values.to(__SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Optional[int] = self.dummy_cond_unet
a_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
a_ : Any = self.dummy_vae
a_ : Optional[int] = self.dummy_text_encoder
a_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
a_ : Optional[int] = 77
a_ : int = self.dummy_image.to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
a_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Dict = '''A painting of a squirrel eating a burger'''
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : Optional[Any] = alt_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__SCREAMING_SNAKE_CASE , )
a_ : int = output.images
a_ : List[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a_ : List[str] = alt_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[int] = self.dummy_cond_unet
a_ : List[Any] = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
a_ : int = self.dummy_vae
a_ : Optional[int] = self.dummy_text_encoder
a_ : Optional[int] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
a_ : List[str] = 77
a_ : Tuple = self.dummy_image.to(__SCREAMING_SNAKE_CASE )
# put models in fp16
a_ : str = unet.half()
a_ : int = vae.half()
a_ : str = bert.half()
# make sure here that pndm scheduler skips prk
a_ : int = AltDiffusionImgaImgPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a_ : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__SCREAMING_SNAKE_CASE )
a_ : Any = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = '''A painting of a squirrel eating a burger'''
a_ : Any = torch.manual_seed(0 )
a_ : int = alt_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , image=__SCREAMING_SNAKE_CASE , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
a_ : List[str] = init_image.resize((760, 504) )
a_ : str = '''BAAI/AltDiffusion'''
a_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
a_ : Dict = '''A fantasy landscape, trending on artstation'''
a_ : Any = torch.manual_seed(0 )
a_ : str = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
a_ : Optional[Any] = output.images[0]
a_ : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
a_ : Union[str, Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
a_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
a_ : Optional[int] = init_image.resize((768, 512) )
a_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
a_ : Union[str, Any] = '''BAAI/AltDiffusion'''
a_ : str = AltDiffusionImgaImgPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
a_ : Optional[Any] = '''A fantasy landscape, trending on artstation'''
a_ : Union[str, Any] = torch.manual_seed(0 )
a_ : Tuple = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
a_ : List[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 666 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
a_ : Dict = ZeroShotClassificationPipeline(
model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ) -> Any:
a_ : List[str] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE ), '''labels''': [ANY(__SCREAMING_SNAKE_CASE )], '''scores''': [ANY(__SCREAMING_SNAKE_CASE )]} )
# No kwarg
a_ : Any = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE ), '''labels''': [ANY(__SCREAMING_SNAKE_CASE )], '''scores''': [ANY(__SCREAMING_SNAKE_CASE )]} )
a_ : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE ), '''labels''': [ANY(__SCREAMING_SNAKE_CASE )], '''scores''': [ANY(__SCREAMING_SNAKE_CASE )]} )
a_ : int = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE ), '''labels''': [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )], '''scores''': [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
a_ : List[str] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE ), '''labels''': [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )], '''scores''': [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
a_ : Union[str, Any] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , {'''sequence''': ANY(__SCREAMING_SNAKE_CASE ), '''labels''': [ANY(__SCREAMING_SNAKE_CASE )], '''scores''': [ANY(__SCREAMING_SNAKE_CASE )]} )
# https://github.com/huggingface/transformers/issues/13846
a_ : List[str] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(__SCREAMING_SNAKE_CASE ), '''labels''': [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )], '''scores''': [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )]}
for i in range(1 )
] , )
a_ : Dict = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(__SCREAMING_SNAKE_CASE ), '''labels''': [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )], '''scores''': [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )]}
for i in range(2 )
] , )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier(__SCREAMING_SNAKE_CASE , candidate_labels='''politics''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__SCREAMING_SNAKE_CASE , )
self.run_entailment_id(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Pipeline ) -> Tuple:
a_ : int = zero_shot_classifier.model.config
a_ : Union[str, Any] = config.labelaid
a_ : List[Any] = zero_shot_classifier.entailment_id
a_ : Tuple = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
a_ : Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
a_ : Optional[int] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
a_ : str = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
a_ : Optional[int] = original_labelaid
self.assertEqual(__SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Any = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
a_ : int = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
a_ : int = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
a_ : Any = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : Tuple = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
a_ : Dict = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
a_ : Any = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ : Tuple = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
a_ : int = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
a_ : List[str] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = tf.data.AUTOTUNE
def _UpperCAmelCase ( ):
a_ : Optional[Any] = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__A , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__A , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__A , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__A , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__A , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__A , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__A , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__A , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__A , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__A , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__A , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__A , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__A , default=5_12 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__A , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__A , required=__A , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__A , help='''Model ID to upload to on the Hugging Face Hub.''' )
a_ : Optional[Any] = parser.parse_args()
return args
def _UpperCAmelCase ( __A : Dict ):
try:
if args.tpu_name:
a_ : Any = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
a_ : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__A )
tf.tpu.experimental.initialize_tpu_system(__A )
return tpu
def _UpperCAmelCase ( __A : int ):
a_ : Tuple = 0
for file in file_list:
a_ : Optional[Any] = file.split('''/''' )[-1]
a_ : Optional[int] = re.search(R'''-\d+-(\d+)\.tfrecord''' , __A ).group(1 )
a_ : Optional[Any] = int(__A )
num_samples += sample_count
return num_samples
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Any , __A : List[Any] , __A : Tuple , __A : List[Any] , __A : Optional[Any]=None ):
a_ : str = count_samples(__A )
a_ : Any = tf.data.Dataset.from_tensor_slices(__A )
if shuffle:
a_ : Union[str, Any] = dataset.shuffle(len(__A ) )
a_ : str = tf.data.TFRecordDataset(__A , num_parallel_reads=__A )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
a_ : Tuple = dataset.apply(tf.data.experimental.assert_cardinality(__A ) )
a_ : str = dataset.map(__A , num_parallel_calls=__A )
if shuffle:
assert shuffle_buffer_size is not None
a_ : Optional[int] = dataset.shuffle(args.shuffle_buffer_size )
a_ : Any = dataset.batch(__A , drop_remainder=__A )
a_ : Optional[Any] = dataset.map(__A , num_parallel_calls=__A )
a_ : Optional[int] = dataset.prefetch(__A )
return dataset
def _UpperCAmelCase ( __A : List[Any] ):
if not args.no_tpu:
a_ : Optional[int] = initialize_tpu(__A )
a_ : Dict = tf.distribute.TPUStrategy(__A )
else:
a_ : Dict = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
a_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
a_ : Any = AutoConfig.from_pretrained(args.pretrained_model_config )
a_ : Tuple = tokenizer.vocab_size
a_ : Tuple = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
a_ : Optional[int] = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
a_ : Any = count_samples(__A )
a_ : Optional[Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
a_ : Union[str, Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
a_ : Optional[Any] = TFAutoModelForMaskedLM.from_config(__A )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
a_ , a_ : int = create_optimizer(
num_train_steps=__A , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__A , metrics=['''accuracy'''] )
def decode_fn(__A : Dict ):
a_ : Tuple = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__A , __A )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
a_ : Tuple = DataCollatorForLanguageModeling(
tokenizer=__A , mlm_probability=args.mlm_probability , mlm=__A , return_tensors='''tf''' )
def mask_with_collator(__A : Dict ):
# TF really needs an isin() function
a_ : int = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
a_ , a_ : int = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__A ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__A , )
return batch
a_ : List[str] = args.per_replica_batch_size * strategy.num_replicas_in_sync
a_ : Union[str, Any] = prepare_dataset(
__A , decode_fn=__A , mask_fn=__A , batch_size=__A , shuffle=__A , shuffle_buffer_size=args.shuffle_buffer_size , )
a_ : str = prepare_dataset(
__A , decode_fn=__A , mask_fn=__A , batch_size=__A , shuffle=__A , )
a_ : Dict = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__A ) )
model.fit(
__A , validation_data=__A , epochs=args.num_epochs , callbacks=__A , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase = parse_args()
main(args)
| 666 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = RoCBertTokenizer
snake_case__ = None
snake_case__ = False
snake_case__ = True
snake_case__ = filter_non_english
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
super().setUp()
a_ : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
a_ : List[Any] = {}
a_ : Any = {}
for i, value in enumerate(__SCREAMING_SNAKE_CASE ):
a_ : Any = i
a_ : List[str] = i
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
a_ : Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a_ : Optional[int] = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : str = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
a_ : List[str] = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : int = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
a_ : Any = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
a_ : int = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
a_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Any = RoCBertBasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a_ : Union[str, Any] = {}
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
a_ : Union[str, Any] = i
a_ : int = RoCBertWordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : List[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
a_ : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : str = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Dict = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
a_ : str = tokenizer_r.encode_plus(
__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , )
a_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(__SCREAMING_SNAKE_CASE , '''do_lower_case''' ) else False
a_ : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : List[Any] = ['''的''', '''人''', '''有''']
a_ : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : str = True
a_ : List[str] = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : int = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[Any] = False
a_ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
a_ : Optional[int] = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(__SCREAMING_SNAKE_CASE )
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
a_ : str = tokenizer.encode('''你好''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.encode('''你是谁''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Tuple = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
a_ : Optional[int] = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
a_ : Union[str, Any] = '''你好,你是谁'''
a_ : List[str] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
a_ : Tuple = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = tokenizer.convert_tokens_to_shape_ids(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_tokens_to_pronunciation_ids(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.prepare_for_model(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = tokenizer.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__lowerCAmelCase = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 666 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase = ''
__lowerCAmelCase = ''
__lowerCAmelCase = ''
__lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
a_ , a_ : Tuple = get_dataset(__A , __A )
print('''Processing...''' )
a_ , a_ , a_ : Optional[int] = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a_ : List[Any] = random_chars(32 )
a_ : Dict = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
a_ : Dict = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Success {index+1}/{len(__A )} with {file_name}' )
a_ : List[str] = []
for anno in new_annos[index]:
a_ : int = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(__A )
with open(f'/{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : List[str] = []
a_ : str = []
for label_file in glob.glob(os.path.join(__A , '''*.txt''' ) ):
a_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__A ) as in_file:
a_ : List[Any] = in_file.readlines()
a_ : int = os.path.join(__A , f'{label_name}.jpg' )
a_ : Tuple = []
for obj_list in obj_lists:
a_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _UpperCAmelCase ( __A : list , __A : list , __A : int = 1 ):
a_ : Any = []
a_ : List[Any] = []
a_ : Optional[int] = []
for idx in range(len(__A ) ):
a_ : Any = []
a_ : Optional[int] = img_list[idx]
path_list.append(__A )
a_ : str = anno_list[idx]
a_ : List[str] = cva.imread(__A )
if flip_type == 1:
a_ : Optional[int] = cva.flip(__A , __A )
for bbox in img_annos:
a_ : Any = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
a_ : Any = cva.flip(__A , __A )
for bbox in img_annos:
a_ : str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( __A : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
a_ : List[str] = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = 'config.json'
__lowerCAmelCase = 'diffusion_pytorch_model.bin'
__lowerCAmelCase = 'diffusion_flax_model.msgpack'
__lowerCAmelCase = 'model.onnx'
__lowerCAmelCase = 'diffusion_pytorch_model.safetensors'
__lowerCAmelCase = 'weights.pb'
__lowerCAmelCase = 'https://huggingface.co'
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = 'diffusers_modules'
__lowerCAmelCase = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
__lowerCAmelCase = ['fp16', 'non-ema']
__lowerCAmelCase = '.self_attn'
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _UpperCAmelCase ( __A : int ):
a_ : Optional[int] = VideoMAEConfig()
set_architecture_configs(__A , __A )
if "finetuned" not in model_name:
a_ : List[Any] = False
if "finetuned" in model_name:
a_ : Optional[int] = '''huggingface/label-files'''
if "kinetics" in model_name:
a_ : Union[str, Any] = 4_00
a_ : Optional[Any] = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
a_ : Union[str, Any] = 1_74
a_ : List[Any] = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
a_ : Dict = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
a_ : str = {int(__A ): v for k, v in idalabel.items()}
a_ : Optional[int] = idalabel
a_ : int = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] , __A : Optional[Any] ):
if "small" in model_name:
a_ : Optional[Any] = 3_84
a_ : Union[str, Any] = 15_36
a_ : str = 12
a_ : Optional[Any] = 16
a_ : Union[str, Any] = 12
a_ : List[Any] = 3
a_ : List[str] = 1_92
a_ : Optional[int] = 7_68
elif "large" in model_name:
a_ : Optional[int] = 10_24
a_ : Any = 40_96
a_ : List[Any] = 24
a_ : Union[str, Any] = 16
a_ : Dict = 12
a_ : str = 8
a_ : Optional[int] = 5_12
a_ : Any = 20_48
elif "huge" in model_name:
a_ : List[str] = 12_80
a_ : int = 51_20
a_ : List[Any] = 32
a_ : Tuple = 16
a_ : Optional[Any] = 12
a_ : Dict = 8
a_ : Any = 6_40
a_ : Any = 25_60
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def _UpperCAmelCase ( __A : List[str] ):
if "encoder." in name:
a_ : int = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
a_ : int = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
a_ : Union[str, Any] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
a_ : int = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
a_ : str = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
a_ : Optional[Any] = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
a_ : int = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
a_ : int = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
a_ : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
a_ : Any = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
a_ : Dict = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
a_ : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a_ : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a_ : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a_ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
a_ : List[str] = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
a_ : List[Any] = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
a_ : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
a_ : Union[str, Any] = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
a_ : List[Any] = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
a_ : Optional[int] = name.replace('''head''' , '''classifier''' )
return name
def _UpperCAmelCase ( __A : Tuple , __A : Any ):
for key in orig_state_dict.copy().keys():
a_ : int = orig_state_dict.pop(__A )
if key.startswith('''encoder.''' ):
a_ : Optional[int] = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
a_ : Optional[Any] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
a_ : Any = config.decoder_hidden_size
a_ : int = int(key_split[2] )
a_ : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
a_ : Optional[Any] = val[:dim, :]
a_ : Union[str, Any] = val[dim : dim * 2, :]
a_ : Union[str, Any] = val[-dim:, :]
else:
a_ : str = config.hidden_size
a_ : Optional[int] = int(key_split[1] )
a_ : Tuple = '''videomae.encoder.layer.'''
if "weight" in key:
a_ : Dict = val[:dim, :]
a_ : str = val[dim : dim * 2, :]
a_ : Any = val[-dim:, :]
else:
a_ : List[str] = val
return orig_state_dict
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
a_ : List[Any] = np.load(__A )
return list(__A )
def _UpperCAmelCase ( __A : int , __A : Optional[Any] , __A : Union[str, Any] , __A : int ):
a_ : Union[str, Any] = get_videomae_config(__A )
if "finetuned" in model_name:
a_ : Union[str, Any] = VideoMAEForVideoClassification(__A )
else:
a_ : Any = VideoMAEForPreTraining(__A )
# download original checkpoint, hosted on Google Drive
a_ : Optional[int] = '''pytorch_model.bin'''
gdown.cached_download(__A , __A , quiet=__A )
a_ : int = torch.load(__A , map_location='''cpu''' )
if "model" in files:
a_ : Dict = files['''model''']
else:
a_ : str = files['''module''']
a_ : Optional[Any] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
model.eval()
# verify model on basic input
a_ : Optional[int] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
a_ : Union[str, Any] = prepare_video()
a_ : Tuple = image_processor(__A , return_tensors='''pt''' )
if "finetuned" not in model_name:
a_ : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
a_ : str = torch.load(__A )
a_ : Any = model(**__A )
a_ : int = outputs.logits
a_ : Any = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
a_ : List[str] = torch.Size([1, 4_00] )
a_ : str = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
a_ : Optional[Any] = torch.Size([1, 1_74] )
a_ : Optional[Any] = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
a_ : List[str] = torch.Size([1, 14_08, 15_36] )
a_ : Any = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
a_ : List[Any] = torch.Size([1, 14_08, 15_36] )
a_ : Tuple = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
a_ : List[Any] = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
a_ : Optional[int] = torch.Size([1, 14_08, 15_36] )
a_ : List[Any] = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
a_ : Optional[int] = torch.Size([1, 4_00] )
a_ : Any = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
a_ : List[Any] = torch.Size([1, 4_00] )
a_ : List[Any] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
a_ : Any = torch.Size([1, 4_00] )
a_ : Optional[int] = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
a_ : Optional[Any] = torch.Size([1, 4_00] )
a_ : List[Any] = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
a_ : List[str] = torch.Size([1, 14_08, 15_36] )
a_ : str = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
a_ : Optional[Any] = torch.Size([1, 1_74] )
a_ : Union[str, Any] = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
a_ : List[str] = torch.Size([1, 14_08, 15_36] )
a_ : Union[str, Any] = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
a_ : Union[str, Any] = torch.Size([1, 1_74] )
a_ : Dict = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __A , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __A , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
a_ : int = outputs.loss
assert torch.allclose(__A , __A , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
model.save_pretrained(__A )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__A , organization='''nielsr''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 666 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bridgetower_vision_model"
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : List[str]=288 , __SCREAMING_SNAKE_CASE : Dict=1 , __SCREAMING_SNAKE_CASE : Tuple=1e-05 , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> int:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : str = hidden_size
a_ : int = num_hidden_layers
a_ : Optional[Any] = num_channels
a_ : List[Any] = patch_size
a_ : int = image_size
a_ : Optional[Any] = initializer_factor
a_ : Optional[int] = layer_norm_eps
a_ : Optional[int] = stop_gradient
a_ : Optional[int] = share_layernorm
a_ : int = remove_last_layer
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : str ) -> "PretrainedConfig":
a_ , a_ : Tuple = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config_dict.get('''model_type''' ) == "bridgetower":
a_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bridgetower_text_model"
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=5_0265 , __SCREAMING_SNAKE_CASE : Any=768 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[int]=3072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=514 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=1e-05 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Optional[Any]="absolute" , __SCREAMING_SNAKE_CASE : Optional[Any]=True , **__SCREAMING_SNAKE_CASE : str , ) -> Optional[Any]:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = vocab_size
a_ : List[Any] = hidden_size
a_ : int = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Union[str, Any] = hidden_act
a_ : Union[str, Any] = initializer_factor
a_ : Tuple = intermediate_size
a_ : Tuple = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : int = type_vocab_size
a_ : Union[str, Any] = layer_norm_eps
a_ : Optional[int] = position_embedding_type
a_ : Union[str, Any] = use_cache
a_ : Optional[int] = pad_token_id
a_ : Optional[Any] = bos_token_id
a_ : int = eos_token_id
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> "PretrainedConfig":
a_ , a_ : Optional[int] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config_dict.get('''model_type''' ) == "bridgetower":
a_ : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bridgetower"
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-05 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Union[str, Any]="add" , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=6 , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : int , ) -> Dict:
# TODO: remove this once the Hub files are updated.
a_ : List[Any] = kwargs.pop('''text_config_dict''' , __SCREAMING_SNAKE_CASE )
a_ : Dict = kwargs.pop('''vision_config_dict''' , __SCREAMING_SNAKE_CASE )
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = share_cross_modal_transformer_layers
a_ : Optional[int] = hidden_act
a_ : Optional[int] = hidden_size
a_ : Tuple = initializer_factor
a_ : Dict = layer_norm_eps
a_ : str = share_link_tower_layers
a_ : List[Any] = link_tower_type
a_ : Dict = num_attention_heads
a_ : List[str] = num_hidden_layers
a_ : Optional[Any] = tie_word_embeddings
a_ : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
a_ : Any = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
a_ : Optional[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
a_ : int = BridgeTowerTextConfig(**__SCREAMING_SNAKE_CASE )
a_ : int = BridgeTowerVisionConfig(**__SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , __SCREAMING_SNAKE_CASE : BridgeTowerTextConfig , __SCREAMING_SNAKE_CASE : BridgeTowerVisionConfig , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
a_ : Optional[Any] = copy.deepcopy(self.__dict__ )
a_ : List[str] = self.text_config.to_dict()
a_ : Optional[int] = self.vision_config.to_dict()
a_ : Optional[int] = self.__class__.model_type
return output
| 666 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCAmelCase ( __A : float , __A : int ):
a_ : Any = u
for i in range(1 , __A ):
a_ : int = temp * (u - i)
return temp
def _UpperCAmelCase ( ):
a_ : Tuple = int(input('''enter the numbers of values: ''' ) )
a_ : list[list[float]] = []
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
a_ : List[str] = 0
print('''enter the values of parameters in a list: ''' )
a_ : List[Any] = list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
a_ : Optional[Any] = float(input() )
a_ : Union[str, Any] = int(input('''enter the value to interpolate: ''' ) )
a_ : Any = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
a_ : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
a_ : Optional[int] = y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 666 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 1 |
'''simple docstring'''
from typing import Any
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
a_ : Union[str, Any] = data
a_ : Any = None
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict ) -> Tuple:
a_ : str = None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = self.head
while temp is not None:
print(temp.data , end=''' ''' )
a_ : Optional[int] = temp.next
print()
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
a_ : Optional[int] = Node(__SCREAMING_SNAKE_CASE )
a_ : List[str] = self.head
a_ : int = new_node
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
if node_data_a == node_data_a:
return
else:
a_ : str = self.head
while node_a is not None and node_a.data != node_data_a:
a_ : Optional[Any] = node_a.next
a_ : Union[str, Any] = self.head
while node_a is not None and node_a.data != node_data_a:
a_ : Tuple = node_a.next
if node_a is None or node_a is None:
return
a_ , a_ : List[Any] = node_a.data, node_a.data
if __name__ == "__main__":
__lowerCAmelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 666 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 1 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[str]=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=5 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : str=0.02 , ) -> Optional[Any]:
a_ : Optional[Any] = parent
a_ : Optional[int] = batch_size
a_ : int = image_size
a_ : int = patch_size
a_ : Optional[Any] = num_channels
a_ : Any = is_training
a_ : Union[str, Any] = use_labels
a_ : List[Any] = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : Dict = hidden_act
a_ : Optional[Any] = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : List[str] = type_sequence_label_size
a_ : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ : Tuple = (image_size // patch_size) ** 2
a_ : Optional[Any] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
a_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Any = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
a_ : Tuple = FlaxViTModel(config=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
a_ : str = (self.image_size, self.image_size)
a_ : Union[str, Any] = (self.patch_size, self.patch_size)
a_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
a_ : Dict = self.type_sequence_label_size
a_ : Tuple = FlaxViTForImageClassification(config=__SCREAMING_SNAKE_CASE )
a_ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : int = 1
a_ : Optional[Any] = FlaxViTForImageClassification(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : str = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) ,
) : Tuple = config_and_inputs
a_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : str ) -> None:
a_ : List[Any] = FlaxViTModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Dict = model_class(__SCREAMING_SNAKE_CASE )
a_ : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Optional[int] = [*signature.parameters.keys()]
a_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a_ : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : int = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : int ):
return model(pixel_values=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
a_ : int = model_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
a_ : Optional[int] = model_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
for model_class_name in self.all_model_classes:
a_ : Union[str, Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
a_ : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 666 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 1 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _UpperCAmelCase ( __A : Dict ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 666 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : Dict = tempfile.mkdtemp()
# fmt: off
a_ : Tuple = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
a_ : Optional[int] = dict(zip(A__ , range(len(A__ ) ) ) )
a_ : Optional[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
a_ : int = {'''unk_token''': '''<unk>'''}
a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
a_ : Any = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
a_ : Any = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A__ , A__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Dict ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ )
def SCREAMING_SNAKE_CASE ( self : Dict , **__SCREAMING_SNAKE_CASE : int ) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
a_ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a_ : Optional[int] = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : Union[str, Any] = self.get_tokenizer()
a_ : str = self.get_rust_tokenizer()
a_ : Dict = self.get_image_processor()
a_ : Any = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_slow.save_pretrained(self.tmpdirname )
a_ : List[str] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
a_ : Dict = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_fast.save_pretrained(self.tmpdirname )
a_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A__ )
self.assertIsInstance(processor_fast.tokenizer , A__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A__ )
self.assertIsInstance(processor_fast.image_processor , A__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : int = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a_ : Any = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
a_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ : int = self.get_image_processor()
a_ : Any = self.get_tokenizer()
a_ : Optional[int] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
a_ : List[str] = self.prepare_image_inputs()
a_ : Any = image_processor(A__ , return_tensors='''np''' )
a_ : Optional[Any] = processor(images=A__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : List[str] = self.get_image_processor()
a_ : Optional[int] = self.get_tokenizer()
a_ : List[str] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
a_ : Union[str, Any] = '''lower newer'''
a_ : Optional[int] = processor(text=A__ )
a_ : str = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : List[str] = self.get_image_processor()
a_ : List[Any] = self.get_tokenizer()
a_ : List[str] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
a_ : Optional[Any] = '''lower newer'''
a_ : Union[str, Any] = self.prepare_image_inputs()
a_ : List[str] = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
a_ : str = self.get_image_processor()
a_ : Dict = self.get_tokenizer()
a_ : Dict = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
a_ : Optional[int] = self.prepare_image_inputs()
a_ : List[str] = self.prepare_image_inputs()
a_ : Any = processor(images=A__ , visual_prompt=A__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Optional[Any] = self.get_image_processor()
a_ : Optional[int] = self.get_tokenizer()
a_ : int = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
a_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ : Union[str, Any] = processor.batch_decode(A__ )
a_ : int = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
| 700 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 0 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCAmelCase ( __A : str = 3 ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
a_ : Tuple = QuantumRegister(_SCREAMING_SNAKE_CASE , '''qr''' )
a_ : List[str] = ClassicalRegister(_SCREAMING_SNAKE_CASE , '''cr''' )
a_ : Any = QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : Any = number_of_qubits
for i in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
a_ : Optional[Any] = Aer.get_backend('''qasm_simulator''' )
a_ : int = execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_00_00 )
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 702 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( __A : List[Any] , __A : List[str] = 0.0 , __A : Optional[Any] = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 0 |
'''simple docstring'''
import os
def _UpperCAmelCase ( ):
a_ : Optional[int] = os.path.dirname(os.path.realpath(_lowercase ) )
a_ : Tuple = os.path.join(_lowercase , '''triangle.txt''' )
with open(_lowercase ) as f:
a_ : Dict = f.readlines()
a_ : Tuple = []
for line in triangle:
a_ : List[Any] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_lowercase ) )
a.append(_lowercase )
for i in range(1 , len(_lowercase ) ):
for j in range(len(a[i] ) ):
a_ : str = a[i - 1][j] if j != len(a[i - 1] ) else 0
a_ : Optional[int] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowercase , _lowercase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 704 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def _UpperCAmelCase ( ):
from torch.utils.cpp_extension import load
a_ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
a_ : Optional[int] = [
root / filename
for filename in [
"vision.cpp",
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 705 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : Union[str, Any] , __A : List[Any] ):
a_ : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( __A : Union[str, Any] , __A : List[str] , __A : List[Any] ):
a_ : str = 0
while b > 0:
if b & 1:
a_ : Tuple = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 706 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 0 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__lowerCAmelCase = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _UpperCAmelCase ( __A : Optional[int] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
if args.student_type == "roberta":
a_ : int = False
elif args.student_type == "gpt2":
a_ : List[Any] = False
def _UpperCAmelCase ( __A : Union[str, Any] , __A : str ):
if args.student_type == "roberta":
a_ : Optional[Any] = False
def _UpperCAmelCase ( ):
a_ : Any = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__A , required=__A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__A , required=__A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__A , required=__A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__A , type=__A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__A , required=__A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=__A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=__A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=__A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=__A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__A , default=40_00 , help='''Checkpoint interval.''' )
a_ : str = parser.parse_args()
sanity_checks(__A )
# ARGS #
init_gpu_params(__A )
set_seed(__A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__A ) , __A , indent=4 )
git_log(args.dump_path )
a_ : Any = MODEL_CLASSES[args.student_type]
a_ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a_ : List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
a_ : str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a_ : str = tokenizer.all_special_tokens.index(__A )
a_ : str = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
a_ : Union[str, Any] = special_tok_ids
a_ : Optional[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , '''rb''' ) as fp:
a_ : Union[str, Any] = pickle.load(__A )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , '''rb''' ) as fp:
a_ : Optional[int] = pickle.load(__A )
a_ : List[Any] = np.maximum(__A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a_ : Dict = 0.0 # do not predict special tokens
a_ : List[str] = torch.from_numpy(__A )
else:
a_ : int = None
a_ : Optional[int] = LmSeqsDataset(params=__A , data=__A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
a_ : str = student_config_class.from_pretrained(args.student_config )
a_ : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
a_ : Any = student_model_class.from_pretrained(args.student_pretrained_weights , config=__A )
else:
a_ : List[Any] = student_model_class(__A )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info('''Student loaded.''' )
# TEACHER #
a_ : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__A )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__A , __A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__A , __A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a_ : Optional[Any] = Distiller(
params=__A , dataset=__A , token_probs=__A , student=__A , teacher=__A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.json'}
__lowerCAmelCase = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
__lowerCAmelCase = {'mgp-str': 27}
class SCREAMING_SNAKE_CASE ( __A ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int="[GO]" , __SCREAMING_SNAKE_CASE : Union[str, Any]="[GO]" , __SCREAMING_SNAKE_CASE : Optional[Any]="[s]" , __SCREAMING_SNAKE_CASE : Any="[GO]" , **__SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
super().__init__(
unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
a_ : List[str] = json.load(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return len(self.vocab )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str ) -> Dict:
a_ : List[str] = []
for s in text:
char_tokens.extend(__SCREAMING_SNAKE_CASE )
return char_tokens
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
return self.decoder.get(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__SCREAMING_SNAKE_CASE ) )
return
a_ : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' )
return (vocab_file,)
| 708 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _UpperCAmelCase ( __A : int , __A : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = []
a_ : Optional[int] = 11
a_ : str = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
a_ : Optional[Any] = 10
return solutions
def _UpperCAmelCase ( __A : int = 2 ):
a_ : Optional[Any] = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
a_ : Any = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 709 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
__lowerCAmelCase = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
__lowerCAmelCase = {
"ctrl": 256,
}
__lowerCAmelCase = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def _UpperCAmelCase ( __A : Optional[int] ):
a_ : Optional[Any] = set()
a_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ : Optional[int] = char
a_ : List[Any] = set(lowerCamelCase__ )
return pairs
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = CONTROL_CODES
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any="<unk>" , **__SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
a_ : Union[str, Any] = json.load(UpperCamelCase_ )
a_ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
a_ : List[Any] = merges_handle.read().split('''\n''' )[1:-1]
a_ : Optional[int] = [tuple(merge.split() ) for merge in merges]
a_ : str = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
a_ : List[Any] = {}
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
a_ : List[Any] = tuple(UpperCamelCase_ )
a_ : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a_ : Tuple = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
a_ : int = min(UpperCamelCase_ , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a_ : Optional[int] = bigram
a_ : int = []
a_ : Optional[Any] = 0
while i < len(UpperCamelCase_ ):
try:
a_ : Any = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a_ : Optional[Any] = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ : Dict = tuple(UpperCamelCase_ )
a_ : int = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
a_ : str = get_pairs(UpperCamelCase_ )
a_ : Any = "@@ ".join(UpperCamelCase_ )
a_ : Union[str, Any] = word[:-4]
a_ : Optional[Any] = word
return word
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
a_ : int = []
a_ : str = re.findall(r'''\S+\n?''' , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> List[str]:
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
a_ : List[Any] = " ".join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] = None ) -> List[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
a_ : List[str] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
a_ : List[str] = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 710 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=37 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : str=0 , ) -> List[Any]:
a_ : Dict = parent
a_ : str = batch_size
a_ : str = seq_length
a_ : List[str] = is_training
a_ : Optional[Any] = use_input_mask
a_ : Optional[Any] = use_token_type_ids
a_ : List[Any] = use_labels
a_ : Any = vocab_size
a_ : List[Any] = hidden_size
a_ : Dict = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : Optional[int] = intermediate_size
a_ : str = hidden_act
a_ : str = hidden_dropout_prob
a_ : Dict = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : Union[str, Any] = type_vocab_size
a_ : List[str] = type_sequence_label_size
a_ : Optional[int] = initializer_range
a_ : Any = num_labels
a_ : int = num_choices
a_ : List[Any] = scope
a_ : Optional[int] = projection_dim
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
a_ : int = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Dict = None
if self.use_token_type_ids:
a_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : Tuple = None
a_ : Optional[Any] = None
a_ : Optional[Any] = None
if self.use_labels:
a_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Tuple = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
a_ : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
a_ : Tuple = TFDPRContextEncoder(config=__A )
a_ : Any = model(__A , attention_mask=__A , token_type_ids=__A )
a_ : Dict = model(__A , token_type_ids=__A )
a_ : Any = model(__A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
a_ : Any = TFDPRQuestionEncoder(config=__A )
a_ : Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A )
a_ : Tuple = model(__A , token_type_ids=__A )
a_ : List[Any] = model(__A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
a_ : Optional[int] = TFDPRReader(config=__A )
a_ : int = model(__A , attention_mask=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ : Tuple = self.prepare_config_and_inputs()
(
a_
) : Optional[int] = config_and_inputs
a_ : Optional[Any] = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
a_ : Dict = TFDPRModelTester(self )
a_ : Any = ConfigTester(self , config_class=__A , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__A )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__A )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__A )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Optional[int] = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = TFDPRQuestionEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Tuple = TFDPRReader.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
a_ : int = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
a_ : Optional[int] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
a_ : Optional[Any] = model(__A )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
a_ : Tuple = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 711 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
a_ : Dict = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
a_ : str = dict(scheduler.config )
a_ : Any = 1
a_ : List[Any] = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
a_ : Union[str, Any] = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
a_ : Dict = dict(scheduler.config )
a_ : int = True
a_ : Union[str, Any] = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] = "auto" ) -> Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a_ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
self.enable_attention_slicing(_a )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a_ : Optional[int] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] = 512 , __SCREAMING_SNAKE_CASE : Union[str, Any] = 512 , __SCREAMING_SNAKE_CASE : List[Any] = 50 , __SCREAMING_SNAKE_CASE : int = 7.5 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : List[Any] = 1 , __SCREAMING_SNAKE_CASE : Any = 0.0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : List[str] = None , __SCREAMING_SNAKE_CASE : List[Any] = "pil" , __SCREAMING_SNAKE_CASE : str = True , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : str = 1 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]:
a_ : int = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
a_ : List[str] = self.segmentation_model(**_a )
a_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
a_ : Any = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
a_ : Dict = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 712 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : List[Any] ):
a_ : List[str] = len(__UpperCAmelCase )
a_ : List[Any] = len(matrix[0] )
a_ : List[str] = min(__UpperCAmelCase , __UpperCAmelCase )
for row in range(__UpperCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __UpperCAmelCase ):
a_ : str = matrix[col][row] / matrix[row][row]
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
a_ : Optional[Any] = True
for i in range(row + 1 , __UpperCAmelCase ):
if matrix[i][row] != 0:
a_ , a_ : Optional[Any] = matrix[i], matrix[row]
a_ : List[str] = False
break
if reduce:
rank -= 1
for i in range(__UpperCAmelCase ):
a_ : Optional[int] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 0 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n'
__lowerCAmelCase = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
__lowerCAmelCase = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _UpperCAmelCase ( __A : Optional[int] ):
def remove_articles(__A : Any ):
a_ : Dict = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(lowerCAmelCase__ , ''' ''' , lowerCAmelCase__ )
def white_space_fix(__A : Dict ):
return " ".join(text.split() )
def remove_punc(__A : Dict ):
a_ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def _UpperCAmelCase ( __A : Optional[int] , __A : List[Any] ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def _UpperCAmelCase ( __A : Any , __A : List[str] ):
a_ : int = [any(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
return (sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ )) * 1_00
def _UpperCAmelCase ( __A : List[str] , __A : Any , __A : Any , __A : int ):
a_ : Tuple = [rgram for rgrams in rgramslist for rgram in rgrams]
a_ : int = Counter(lowerCAmelCase__ )
a_ : Optional[int] = Counter(lowerCAmelCase__ )
a_ : str = Counter()
for sgram, scount in sgramcounter.items():
a_ : Union[str, Any] = scount * numref
a_ : Any = Counter(lowerCAmelCase__ )
a_ : str = Counter()
for cgram, ccount in cgramcounter.items():
a_ : str = ccount * numref
# KEEP
a_ : Union[str, Any] = sgramcounter_rep & cgramcounter_rep
a_ : List[Any] = keepgramcounter_rep & rgramcounter
a_ : Optional[int] = sgramcounter_rep & rgramcounter
a_ : Union[str, Any] = 0
a_ : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a_ : Any = 1
a_ : Union[str, Any] = 1
if len(lowerCAmelCase__ ) > 0:
a_ : Optional[int] = keeptmpscorea / len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
a_ : int = keeptmpscorea / sum(keepgramcounterall_rep.values() )
a_ : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
a_ : Dict = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
a_ : Dict = sgramcounter_rep - cgramcounter_rep
a_ : Optional[Any] = delgramcounter_rep - rgramcounter
a_ : List[str] = sgramcounter_rep - rgramcounter
a_ : Optional[Any] = 0
a_ : str = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a_ : str = 1
if len(lowerCAmelCase__ ) > 0:
a_ : Optional[Any] = deltmpscorea / len(lowerCAmelCase__ )
# ADDITION
a_ : Union[str, Any] = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
a_ : List[Any] = set(lowerCAmelCase__ ) & set(lowerCAmelCase__ )
a_ : Union[str, Any] = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
a_ : List[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a_ : str = 1
a_ : List[str] = 1
if len(lowerCAmelCase__ ) > 0:
a_ : Optional[Any] = addtmpscore / len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
a_ : Union[str, Any] = addtmpscore / len(lowerCAmelCase__ )
a_ : Optional[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
a_ : Any = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCAmelCase ( __A : Optional[int] , __A : Optional[Any] , __A : List[str] ):
a_ : str = len(lowerCAmelCase__ )
a_ : Any = ssent.split(''' ''' )
a_ : Optional[int] = csent.split(''' ''' )
a_ : str = []
a_ : List[str] = []
a_ : int = []
a_ : Dict = []
a_ : str = []
a_ : int = []
a_ : Any = []
a_ : List[Any] = []
a_ : Union[str, Any] = []
a_ : str = []
for rsent in rsents:
a_ : Any = rsent.split(''' ''' )
a_ : Tuple = []
a_ : Dict = []
a_ : str = []
ragramslist.append(lowerCAmelCase__ )
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
if i < len(lowerCAmelCase__ ) - 1:
a_ : Union[str, Any] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 2:
a_ : Dict = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 3:
a_ : Optional[Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowerCAmelCase__ )
ragramslist.append(lowerCAmelCase__ )
ragramslist.append(lowerCAmelCase__ )
ragramslist.append(lowerCAmelCase__ )
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
if i < len(lowerCAmelCase__ ) - 1:
a_ : int = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 2:
a_ : List[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 3:
a_ : Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowerCAmelCase__ )
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
if i < len(lowerCAmelCase__ ) - 1:
a_ : List[str] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 2:
a_ : int = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 3:
a_ : str = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowerCAmelCase__ )
((a_) , (a_) , (a_)) : Any = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((a_) , (a_) , (a_)) : Union[str, Any] = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((a_) , (a_) , (a_)) : str = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((a_) , (a_) , (a_)) : str = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a_ : Dict = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
a_ : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
a_ : List[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
a_ : Optional[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCAmelCase ( __A : Optional[int] , __A : int = True , __A : Union[str, Any] = "13a" , __A : Tuple = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
a_ : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
a_ : Optional[Any] = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase__ )()(lowerCAmelCase__ )
else:
a_ : List[Any] = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase__ )
elif tokenizer == "moses":
a_ : List[str] = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase__ , return_str=lowerCAmelCase__ , escape=lowerCAmelCase__ )
elif tokenizer == "penn":
a_ : int = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase__ , return_str=lowerCAmelCase__ )
else:
a_ : str = sentence
if not return_str:
a_ : Dict = normalized_sent.split()
return normalized_sent
def _UpperCAmelCase ( __A : int , __A : int , __A : Optional[Any] ):
if not (len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
a_ : Tuple = 0
for src, pred, refs in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
sari_score += SARIsent(normalize(lowerCAmelCase__ ) , normalize(lowerCAmelCase__ ) , [normalize(lowerCAmelCase__ ) for sent in refs] )
a_ : List[str] = sari_score / len(lowerCAmelCase__ )
return 1_00 * sari_score
def _UpperCAmelCase ( __A : Optional[int] , __A : Union[str, Any] , __A : List[Any]="exp" , __A : List[Any]=None , __A : List[str]=False , __A : Tuple=False , __A : Any=False , ):
a_ : Union[str, Any] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
a_ : str = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
a_ : List[str] = sacrebleu.corpus_bleu(
lowerCAmelCase__ , lowerCAmelCase__ , smooth_method=lowerCAmelCase__ , smooth_value=lowerCAmelCase__ , force=lowerCAmelCase__ , lowercase=lowerCAmelCase__ , use_effective_order=lowerCAmelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Dict:
a_ : Optional[int] = {}
result.update({'''sari''': compute_sari(sources=_UpperCAmelCase , predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_UpperCAmelCase , references=_UpperCAmelCase )} )
return result
| 714 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Any , __A : Dict , __A : Tuple , __A : Tuple ):
a_ : List[Any] = StableDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
a_ : str = load_file(__A )
a_ : int = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
a_ : Union[str, Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
a_ : Any = pipeline.text_encoder
else:
a_ : Union[str, Any] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
a_ : Any = pipeline.unet
# find the target layer
a_ : Any = layer_infos.pop(0 )
while len(__A ) > -1:
try:
a_ : List[Any] = curr_layer.__getattr__(__A )
if len(__A ) > 0:
a_ : List[Any] = layer_infos.pop(0 )
elif len(__A ) == 0:
break
except Exception:
if len(__A ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
a_ : Union[str, Any] = layer_infos.pop(0 )
a_ : Optional[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(__A )
else:
pair_keys.append(__A )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
a_ : int = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
a_ : Optional[Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__A , __A ).unsqueeze(2 ).unsqueeze(3 )
else:
a_ : Tuple = state_dict[pair_keys[0]].to(torch.floataa )
a_ : Optional[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__A , __A )
# update visited list
for item in pair_keys:
visited.append(__A )
return pipeline
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.base_model_path
__lowerCAmelCase = args.checkpoint_path
__lowerCAmelCase = args.dump_path
__lowerCAmelCase = args.lora_prefix_unet
__lowerCAmelCase = args.lora_prefix_text_encoder
__lowerCAmelCase = args.alpha
__lowerCAmelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__lowerCAmelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 715 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 0 |
'''simple docstring'''
import re
def _UpperCAmelCase ( __A : Dict ):
a_ : Union[str, Any] = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(_lowerCamelCase , _lowerCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 716 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( _A ):
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "BlipImageProcessor"
snake_case__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
a_ : Dict = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
a_ : int = self.image_processor
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : ImageInput = None , __SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
a_ : Optional[int] = self.tokenizer
a_ : List[str] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
a_ : Any = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
a_ : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
a_ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : Optional[int] = self.tokenizer.model_input_names
a_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 0 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int=13 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Any=99 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : Tuple=5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Tuple="last" , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=0 , ) -> List[str]:
a_ : int = parent
a_ : Dict = batch_size
a_ : Dict = seq_length
a_ : Dict = is_training
a_ : Tuple = use_input_lengths
a_ : str = use_token_type_ids
a_ : List[str] = use_labels
a_ : Any = gelu_activation
a_ : int = sinusoidal_embeddings
a_ : Union[str, Any] = causal
a_ : str = asm
a_ : Dict = n_langs
a_ : Union[str, Any] = vocab_size
a_ : Dict = n_special
a_ : List[str] = hidden_size
a_ : Union[str, Any] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : List[str] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : Dict = type_sequence_label_size
a_ : Any = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : Tuple = summary_type
a_ : List[Any] = use_proj
a_ : Dict = scope
a_ : Union[str, Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Any = None
if self.use_input_lengths:
a_ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a_ : Optional[Any] = None
if self.use_token_type_ids:
a_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a_ : List[Any] = None
a_ : List[str] = None
a_ : Optional[int] = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : str = ids_tensor([self.batch_size] , 2 ).float()
a_ : int = ids_tensor([self.batch_size] , self.num_choices )
a_ : int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , ) -> str:
a_ : Tuple = XLMModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a_ : Any = model(lowerCamelCase_ , lengths=lowerCamelCase_ , langs=lowerCamelCase_ )
a_ : Optional[int] = model(lowerCamelCase_ , langs=lowerCamelCase_ )
a_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple:
a_ : Any = XLMWithLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a_ : Union[str, Any] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[Any]:
a_ : List[str] = XLMForQuestionAnsweringSimple(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a_ : int = model(lowerCamelCase_ )
a_ : Optional[int] = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
a_ : Union[str, Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , ) -> int:
a_ : Dict = XLMForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a_ : Union[str, Any] = model(lowerCamelCase_ )
a_ : str = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , p_mask=lowerCamelCase_ , )
a_ : Tuple = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , )
(a_ ) : List[Any] = result_with_labels.to_tuple()
a_ : Dict = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
(a_ ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[str]:
a_ : Optional[Any] = XLMForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a_ : Tuple = model(lowerCamelCase_ )
a_ : Any = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
a_ : Any = self.num_labels
a_ : Optional[int] = XLMForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a_ : List[str] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Optional[Any]:
a_ : Dict = self.num_choices
a_ : List[Any] = XLMForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ : int = self.prepare_config_and_inputs()
(
a_
) : List[str] = config_and_inputs
a_ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
snake_case__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case__ = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any=False ) -> Any:
a_ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
a_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : List[str] = XLMModelTester(self )
a_ : Dict = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=37 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=1 ) -> Union[str, Any]:
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase_ ) )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
a_ : Any = min_length + idx + 1
a_ : Any = min_length + idx + 1
a_ : int = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase_ ) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=1 ) -> Tuple:
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase_ ) , )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
a_ : List[str] = min_length + idx + 1
a_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase_ ) , )
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Union[str, Any] = XLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Tuple = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(lowerCamelCase_ )
a_ : int = torch.tensor([[14, 447]] , dtype=torch.long , device=lowerCamelCase_ ) # the president
a_ : int = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a_ : Optional[int] = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase_ )
| 718 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
snake_case__ = 42
@flax_register_to_config
class SCREAMING_SNAKE_CASE ( nn.Module , _UpperCAmelCase , _UpperCAmelCase ):
snake_case__ = 32
snake_case__ = 4
snake_case__ = 4
snake_case__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
snake_case__ = False
snake_case__ = (320, 640, 1280, 1280)
snake_case__ = 2
snake_case__ = 8
snake_case__ = None
snake_case__ = 1280
snake_case__ = 0.0
snake_case__ = False
snake_case__ = jnp.floataa
snake_case__ = True
snake_case__ = 0
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : jax.random.KeyArray ) -> FrozenDict:
a_ : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
a_ : Any = jnp.zeros(__UpperCamelCase , dtype=jnp.floataa )
a_ : Any = jnp.ones((1,) , dtype=jnp.intaa )
a_ : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a_ , a_ : int = jax.random.split(__UpperCamelCase )
a_ : Dict = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )["params"]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : Any = self.block_out_channels
a_ : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a_ : str = self.num_attention_heads or self.attention_head_dim
# input
a_ : str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a_ : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a_ : int = FlaxTimestepEmbedding(__UpperCamelCase , dtype=self.dtype )
a_ : Tuple = self.only_cross_attention
if isinstance(__UpperCamelCase , __UpperCamelCase ):
a_ : Any = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
a_ : str = (num_attention_heads,) * len(self.down_block_types )
# down
a_ : Any = []
a_ : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
a_ : List[Any] = output_channel
a_ : int = block_out_channels[i]
a_ : Optional[int] = i == len(__UpperCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a_ : str = FlaxCrossAttnDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a_ : Optional[Any] = FlaxDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__UpperCamelCase )
a_ : Optional[int] = down_blocks
# mid
a_ : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
a_ : Any = []
a_ : List[str] = list(reversed(__UpperCamelCase ) )
a_ : int = list(reversed(__UpperCamelCase ) )
a_ : int = list(reversed(__UpperCamelCase ) )
a_ : str = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
a_ : Optional[Any] = output_channel
a_ : Dict = reversed_block_out_channels[i]
a_ : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(__UpperCamelCase ) - 1 )]
a_ : str = i == len(__UpperCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
a_ : str = FlaxCrossAttnUpBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a_ : str = FlaxUpBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__UpperCamelCase )
a_ : Any = output_channel
a_ : Optional[Any] = up_blocks
# out
a_ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
a_ : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
if not isinstance(__UpperCamelCase , jnp.ndarray ):
a_ : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__UpperCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
a_ : List[Any] = timesteps.astype(dtype=jnp.floataa )
a_ : Union[str, Any] = jnp.expand_dims(__UpperCamelCase , 0 )
a_ : List[str] = self.time_proj(__UpperCamelCase )
a_ : List[Any] = self.time_embedding(__UpperCamelCase )
# 2. pre-process
a_ : Dict = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
a_ : List[Any] = self.conv_in(__UpperCamelCase )
# 3. down
a_ : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
a_ , a_ : Optional[Any] = down_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
else:
a_ , a_ : int = down_block(__UpperCamelCase , __UpperCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
a_ : str = ()
for down_block_res_sample, down_block_additional_residual in zip(
__UpperCamelCase , __UpperCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
a_ : Optional[Any] = new_down_block_res_samples
# 4. mid
a_ : Dict = self.mid_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
a_ : List[str] = down_block_res_samples[-(self.layers_per_block + 1) :]
a_ : List[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
a_ : Any = up_block(
__UpperCamelCase , temb=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , res_hidden_states_tuple=__UpperCamelCase , deterministic=not train , )
else:
a_ : List[str] = up_block(__UpperCamelCase , temb=__UpperCamelCase , res_hidden_states_tuple=__UpperCamelCase , deterministic=not train )
# 6. post-process
a_ : Tuple = self.conv_norm_out(__UpperCamelCase )
a_ : Union[str, Any] = nn.silu(__UpperCamelCase )
a_ : Dict = self.conv_out(__UpperCamelCase )
a_ : Optional[int] = jnp.transpose(__UpperCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__UpperCamelCase )
| 719 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def _UpperCAmelCase ( __A : List[str] = 1_00_00_00 , __A : Optional[int] = 10 ):
a_ : str = defaultdict(__A )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a_ : Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a_ : List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__A , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 720 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _UpperCAmelCase ( __A : List[Any] ):
a_ : List[str] = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def _UpperCAmelCase ( __A : str , __A : int ):
a_ : str = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def _UpperCAmelCase ( __A : Tuple ):
a_ : int = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : List[Any] ):
a_ : Union[str, Any] = """imagenet-1k-id2label.json"""
a_ : Optional[Any] = 10_00
a_ : Any = """huggingface/label-files"""
a_ : Tuple = num_labels
a_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
a_ : List[str] = idalabel
a_ : Any = {v: k for k, v in idalabel.items()}
a_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a_ : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a_ : Any = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a_ : Optional[int] = [2, 2, 20]
a_ : str = [3, 12, 16]
a_ : Any = [1_92, 7_68, 10_24]
a_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
a_ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a_ : List[Any] = image_size
a_ : str = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
a_ : Any = OrderedDict()
a_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
a_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
a_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
a_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
a_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCAmelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 721 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : Any , __A : Union[str, Any] ):
'''simple docstring'''
a_ : Any = (boundary[1] - boundary[0]) / steps
a_ : Any = boundary[0]
a_ : int = boundary[1]
a_ : Optional[Any] = make_points(snake_case__ , snake_case__ , snake_case__ )
a_ : List[Any] = 0.0
y += (h / 2.0) * f(snake_case__ )
for i in x_i:
# print(i)
y += h * f(snake_case__ )
y += (h / 2.0) * f(snake_case__ )
return y
def _UpperCAmelCase ( __A : List[Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
a_ : str = a + h
while x < (b - h):
yield x
a_ : List[Any] = x + h
def _UpperCAmelCase ( __A : str ): # enter your function here
'''simple docstring'''
a_ : Tuple = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : int = 0.0 # Lower bound of integration
a_ : List[Any] = 1.0 # Upper bound of integration
a_ : Union[str, Any] = 10.0 # define number of steps or resolution
a_ : Any = [a, b] # define boundary of integration
a_ : Dict = method_a(snake_case__ , snake_case__ )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 700 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
__lowerCAmelCase = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( lowercase__ ):
snake_case__ = 'retribert'
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=3_0522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3072 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Dict=1e-12 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Dict=128 , __SCREAMING_SNAKE_CASE : List[str]=0 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> int:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = vocab_size
a_ : List[str] = hidden_size
a_ : List[Any] = num_hidden_layers
a_ : int = num_attention_heads
a_ : Optional[int] = hidden_act
a_ : List[str] = intermediate_size
a_ : str = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : Union[str, Any] = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : Optional[int] = initializer_range
a_ : str = layer_norm_eps
a_ : Tuple = share_encoders
a_ : str = projection_dim
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( __A : Callable , __A : float , __A : float , __A : float , __A : float ):
a_ : str = int(np.ceil((x_end - xa) / step_size ) )
a_ : int = np.zeros((n + 1,) )
a_ : List[Any] = ya
a_ : str = xa
for k in range(__A ):
a_ : Dict = y[k] + step_size * ode_func(__A , y[k] )
a_ : str = y[k] + (
(step_size / 2) * (ode_func(__A , y[k] ) + ode_func(x + step_size , __A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 703 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : str , __A : Dict , __A : str , __A : Optional[int]=None , __A : Optional[int]=None ):
if "." in tensor_name:
a_ : Optional[int] = tensor_name.split('''.''' )
for split in splits[:-1]:
a_ : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
a_ : Any = new_module
a_ : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
a_ : int = tensor_name in module._buffers
a_ : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
a_ : str = False
a_ : Optional[Any] = False
if is_buffer or not is_bitsandbytes_available():
a_ : List[Any] = False
a_ : int = False
else:
a_ : Optional[Any] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
a_ : Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
a_ : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
a_ : Dict = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
a_ : int = value.to('''cpu''' )
if value.dtype == torch.inta:
a_ : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
a_ : Any = torch.tensor(_lowerCamelCase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowerCamelCase ) and fpaa_statistics is None:
a_ : Dict = new_value.T
a_ : int = old_value.__dict__
if is_abit:
a_ : Optional[int] = bnb.nn.IntaParams(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
elif is_abit:
a_ : List[str] = bnb.nn.Paramsabit(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
a_ : List[str] = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(_lowerCamelCase ) )
else:
if value is None:
a_ : List[str] = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
a_ : Optional[Any] = value.to(_lowerCamelCase )
else:
a_ : Any = torch.tensor(_lowerCamelCase , device=_lowerCamelCase )
if is_buffer:
a_ : Union[str, Any] = new_value
else:
a_ : Union[str, Any] = nn.Parameter(_lowerCamelCase , requires_grad=old_value.requires_grad )
a_ : Dict = new_value
def _UpperCAmelCase ( __A : Dict , __A : Tuple=None , __A : Any=None , __A : Tuple=None , __A : Union[str, Any]=False ):
for name, module in model.named_children():
if current_key_name is None:
a_ : Optional[Any] = []
current_key_name.append(_lowerCamelCase )
if (isinstance(_lowerCamelCase , nn.Linear ) or isinstance(_lowerCamelCase , _lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a_ : List[str] = module.weight.shape
else:
a_ : Any = module.in_features
a_ : int = module.out_features
if quantization_config.quantization_method() == "llm_int8":
a_ : str = bnb.nn.LinearabitLt(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
a_ : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
a_ : Optional[Any] = bnb.nn.Linearabit(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
a_ : str = True
# Store the module class in case we need to transpose the weight later
a_ : Tuple = type(_lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowerCamelCase )
if len(list(module.children() ) ) > 0:
a_ : List[str] = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_been_replaced=_lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Tuple=None , __A : int=None , __A : str=None ):
a_ : Optional[Any] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
a_ : Any = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _UpperCAmelCase ( *__A : Dict , **__A : List[Any] ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _lowerCamelCase , )
return replace_with_bnb_linear(*_lowerCamelCase , **_lowerCamelCase )
def _UpperCAmelCase ( *__A : Union[str, Any] , **__A : Optional[int] ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _lowerCamelCase , )
return set_module_quantized_tensor_to_device(*_lowerCamelCase , **_lowerCamelCase )
def _UpperCAmelCase ( __A : Tuple ):
a_ : int = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
a_ : List[str] = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a_ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a_ : Optional[int] = sum(_lowerCamelCase , [] )
a_ : int = len(_lowerCamelCase ) > 0
# Check if it is a base model
a_ : Optional[int] = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a_ : Optional[int] = list(model.named_children() )
a_ : int = [list_modules[-1][0]]
# add last module together with tied weights
a_ : int = set(_lowerCamelCase ) - set(_lowerCamelCase )
a_ : Any = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
a_ : Dict = [".weight", ".bias"]
a_ : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a_ : Any = name.replace(_lowerCamelCase , '''''' )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names
| 704 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : list[int] ):
if not numbers:
return 0
if not isinstance(__A , (list, tuple) ) or not all(
isinstance(__A , __A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
a_ : str = numbers[0]
for i in range(1 , len(__A ) ):
# update the maximum and minimum subarray products
a_ : Any = numbers[i]
if number < 0:
a_ : Optional[int] = min_till_now, max_till_now
a_ : int = max(__A , max_till_now * number )
a_ : Optional[Any] = min(__A , min_till_now * number )
# update the maximum product found till now
a_ : int = max(__A , __A )
return max_prod
| 705 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger('''transformers.models.speecht5''')
__lowerCAmelCase = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__lowerCAmelCase = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__lowerCAmelCase = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__lowerCAmelCase = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__lowerCAmelCase = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__lowerCAmelCase = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__lowerCAmelCase = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__lowerCAmelCase = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__lowerCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowerCAmelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCAmelCase = []
__lowerCAmelCase = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__lowerCAmelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__lowerCAmelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__lowerCAmelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def _UpperCAmelCase ( __A : List[str] , __A : Optional[Any] , __A : Any , __A : Tuple , __A : Tuple ):
for attribute in key.split('''.''' ):
a_ : List[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
a_ : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
a_ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
a_ : Optional[Any] = value
elif weight_type == "weight_g":
a_ : List[str] = value
elif weight_type == "weight_v":
a_ : int = value
elif weight_type == "bias":
a_ : Tuple = value
elif weight_type == "running_mean":
a_ : Optional[Any] = value
elif weight_type == "running_var":
a_ : Optional[Any] = value
elif weight_type == "num_batches_tracked":
a_ : Optional[Any] = value
else:
a_ : int = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _UpperCAmelCase ( __A : Any , __A : int ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a_ : int = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCAmelCase ( __A : Dict , __A : int , __A : Any ):
a_ : Tuple = []
if task == "s2t":
a_ : int = hf_model.speechta.encoder.prenet.feature_encoder
a_ : Tuple = MAPPING_S2T
a_ : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
a_ : int = None
a_ : Union[str, Any] = MAPPING_T2S
a_ : List[str] = IGNORE_KEYS_T2S
elif task == "s2s":
a_ : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
a_ : Tuple = MAPPING_S2S
a_ : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case__ , snake_case__ ):
logger.info(f'{name} was ignored' )
continue
a_ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
a_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
a_ : Union[str, Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
a_ : Optional[int] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
a_ : Union[str, Any] = True
if "*" in mapped_key:
a_ : int = name.split(snake_case__ )[0].split('''.''' )[-2]
a_ : str = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
a_ : str = """weight_g"""
elif "weight_v" in name:
a_ : Dict = """weight_v"""
elif "bias" in name:
a_ : List[str] = """bias"""
elif "weight" in name:
a_ : Optional[int] = """weight"""
elif "running_mean" in name:
a_ : int = """running_mean"""
elif "running_var" in name:
a_ : int = """running_var"""
elif "num_batches_tracked" in name:
a_ : Union[str, Any] = """num_batches_tracked"""
else:
a_ : Dict = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f'Unused weights: {unused_weights}' )
def _UpperCAmelCase ( __A : Union[str, Any] , __A : List[str] , __A : str , __A : Optional[int] , __A : Dict ):
a_ : Dict = full_name.split('''conv_layers.''' )[-1]
a_ : Dict = name.split('''.''' )
a_ : Union[str, Any] = int(items[0] )
a_ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a_ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a_ : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
a_ : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
a_ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def _UpperCAmelCase ( __A : Tuple , __A : int , __A : Any , __A : List[Any]=None , __A : Tuple=None , __A : Dict=None , ):
if config_path is not None:
a_ : List[Any] = SpeechTaConfig.from_pretrained(snake_case__ )
else:
a_ : List[Any] = SpeechTaConfig()
if task == "s2t":
a_ : Union[str, Any] = config.max_text_positions
a_ : Optional[Any] = SpeechTaForSpeechToText(snake_case__ )
elif task == "t2s":
a_ : List[Any] = 18_76
a_ : Tuple = 6_00
a_ : Union[str, Any] = config.max_speech_positions
a_ : Dict = SpeechTaForTextToSpeech(snake_case__ )
elif task == "s2s":
a_ : List[str] = 18_76
a_ : List[str] = config.max_speech_positions
a_ : Dict = SpeechTaForSpeechToSpeech(snake_case__ )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
a_ : str = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken('''<mask>''' , lstrip=snake_case__ , rstrip=snake_case__ )
a_ : Tuple = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
a_ : List[Any] = SpeechTaFeatureExtractor()
a_ : Optional[Any] = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(snake_case__ )
a_ : Optional[Any] = torch.load(snake_case__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , snake_case__ , snake_case__ )
model.save_pretrained(snake_case__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowerCAmelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 706 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _UpperCAmelCase ( __A : Optional[int] , __A : Dict , __A : Union[str, Any]=[] ):
a_ : Tuple = size[0] - overlap_pixels * 2
a_ : Optional[int] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a_ : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
a_ : int = np.pad(__snake_case , mode='''linear_ramp''' , pad_width=__snake_case , end_values=0 )
if "l" in remove_borders:
a_ : Tuple = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a_ : List[str] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a_ : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a_ : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _UpperCAmelCase ( __A : List[Any] , __A : List[Any] , __A : List[str] ):
return max(__snake_case , min(__snake_case , __snake_case ) )
def _UpperCAmelCase ( __A : Any , __A : List[str] , __A : str ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _UpperCAmelCase ( __A : Any , __A : int , __A : Optional[int] ):
a_ : List[Any] = list(__snake_case )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a_ : List[str] = clamp_rect(__snake_case , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Optional[Any] , __A : str , __A : Optional[int] ):
a_ : Tuple = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__snake_case , (original_slice, 0) )
return result
def _UpperCAmelCase ( __A : Union[str, Any] , __A : Tuple ):
a_ : Tuple = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
a_ : int = tile.crop(__snake_case )
return tile
def _UpperCAmelCase ( __A : Optional[int] , __A : Union[str, Any] ):
a_ : Any = n % d
return n - divisor
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] = 350 , ) -> int:
super().__init__(
vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , low_res_scheduler=__lowerCAmelCase , scheduler=__lowerCAmelCase , max_noise_level=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : Dict = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
a_ : int = add_overlap_rect(__lowerCAmelCase , __lowerCAmelCase , image.size )
a_ : str = image.crop(__lowerCAmelCase )
a_ : List[str] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a_ : Optional[Any] = translated_slice_x - (original_image_slice / 2)
a_ : Optional[Any] = max(0 , __lowerCAmelCase )
a_ : List[Any] = squeeze_tile(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a_ : str = to_input.size
a_ : int = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
a_ : Union[str, Any] = super(__lowerCAmelCase , self ).__call__(image=__lowerCAmelCase , **__lowerCAmelCase ).images[0]
a_ : Union[str, Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
a_ : Optional[Any] = unsqueeze_tile(__lowerCAmelCase , __lowerCAmelCase )
a_ : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
a_ : List[str] = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
a_ : Optional[int] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCAmelCase ) , mode='''L''' , )
final_image.paste(
__lowerCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCAmelCase )
@torch.no_grad()
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] = 75 , __SCREAMING_SNAKE_CASE : Dict = 9.0 , __SCREAMING_SNAKE_CASE : List[str] = 50 , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : Any = 0.0 , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : Optional[Any] = None , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : Any = 1 , __SCREAMING_SNAKE_CASE : List[str] = 128 , __SCREAMING_SNAKE_CASE : str = 32 , __SCREAMING_SNAKE_CASE : Tuple = 32 , ) -> Optional[Any]:
a_ : List[str] = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
a_ : Tuple = math.ceil(image.size[0] / tile_size )
a_ : Tuple = math.ceil(image.size[1] / tile_size )
a_ : str = tcx * tcy
a_ : Union[str, Any] = 0
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
self._process_tile(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prompt=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , noise_level=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def _UpperCAmelCase ( ):
a_ : int = '''stabilityai/stable-diffusion-x4-upscaler'''
a_ : Any = StableDiffusionTiledUpscalePipeline.from_pretrained(__snake_case , revision='''fp16''' , torch_dtype=torch.floataa )
a_ : Tuple = pipe.to('''cuda''' )
a_ : Tuple = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(__A : Optional[Any] ):
print(f'progress: {obj["progress"]:.4f}' )
obj["image"].save('''diffusers_library_progress.jpg''' )
a_ : Any = pipe(image=__snake_case , prompt='''Black font, white background, vector''' , noise_level=40 , callback=__snake_case )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__lowerCAmelCase = parser.parse_args()
if args.model_type == "roberta":
__lowerCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
__lowerCAmelCase = "roberta"
elif args.model_type == "gpt2":
__lowerCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
__lowerCAmelCase = "transformer"
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__lowerCAmelCase = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__lowerCAmelCase = F"""{prefix}.embeddings.{w}.weight"""
__lowerCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
__lowerCAmelCase = F"""{prefix}.embeddings.LayerNorm.{w}"""
__lowerCAmelCase = state_dict[param_name]
# Transformer Blocks #
__lowerCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__lowerCAmelCase = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__lowerCAmelCase = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[F"""lm_head.dense.{w}"""]
__lowerCAmelCase = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[F"""{prefix}.ln_f.{w}"""]
__lowerCAmelCase = state_dict["lm_head.weight"]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 708 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
__lowerCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
__lowerCAmelCase = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
__lowerCAmelCase = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__lowerCAmelCase = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 709 |
'''simple docstring'''
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
a_ : str = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
a_ : Dict = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> str:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=1 ) -> Dict:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
a_ : Dict = W_supports['''sizes'''].tolist()
a_ : Tuple = W_supports['''start_token_id'''].item()
a_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ : int = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Any = self.BERT(**__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : List[str] = W_supports['''input_ids'''] == start_token_id
a_ : Dict = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
a_ : str = 0
else:
a_ : str = support_sizes[i - 1]
a_ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]]
a_ : Tuple = S[s : s + size][end_token_masks[s : s + size]]
a_ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a_ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a_ : Any = torch.vstack((p_starts, p_start) )
a_ : Dict = torch.vstack((p_ends, p_end) )
else:
a_ : Optional[int] = p_start
a_ : List[Any] = p_end
return p_starts, p_ends
| 666 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
snake_case__ = "rwkv"
snake_case__ = {"max_position_embeddings": "context_length"}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=5_0277 , __SCREAMING_SNAKE_CASE : str=1024 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4096 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=1e-5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Dict=6 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Tuple=True , **__SCREAMING_SNAKE_CASE : List[str] , ) -> List[Any]:
a_ : int = vocab_size
a_ : Tuple = context_length
a_ : str = hidden_size
a_ : str = num_hidden_layers
a_ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
a_ : List[Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
a_ : Any = layer_norm_epsilon
a_ : List[str] = rescale_every
a_ : int = use_cache
a_ : Optional[Any] = bos_token_id
a_ : Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 710 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : Optional[Any] ):
a_ : Dict = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCAmelCase ( __A : Optional[Any] = 1_00 ):
a_ : Union[str, Any] = 1
a_ : List[str] = 2
for i in range(2 , max_n + 1 ):
a_ : Union[str, Any] = pre_numerator
a_ : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
a_ : int = cur_numerator
a_ : Tuple = e_cont * pre_numerator + temp
return sum_digits(_snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : tuple[int, int] , __A : int ):
a_ , a_ : List[str] = position
a_ : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : Any = []
for position in positions:
a_ , a_ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def _UpperCAmelCase ( __A : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase ( __A : list[list[int]] , __A : tuple[int, int] , __A : int ):
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
a_ , a_ : Dict = position
if board[y][x] == 0:
a_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
a_ : Tuple = 0
return False
def _UpperCAmelCase ( __A : int ):
a_ : List[str] = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
a_ : Optional[Any] = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
a_ : Union[str, Any] = 0
a_ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowerCAmelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowerCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowerCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def _UpperCAmelCase ( __A : List[Any] , __A : Dict ):
a_ : int = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] )
return (item, float(_lowercase ))
def _UpperCAmelCase ( __A : str , __A : List[Any] ):
a_ : Optional[Any] = random.randint(0 , len(_lowercase ) - 1 )
a_ : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
a_ : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _UpperCAmelCase ( __A : Optional[Any] , __A : Any ):
a_ : Any = list(_lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a_ : List[Any] = random.choice(_lowercase )
return "".join(_lowercase )
def _UpperCAmelCase ( __A : Tuple , __A : Dict , __A : Optional[Any] , ):
a_ : List[str] = []
# Generate more children proportionally to the fitness score.
a_ : Optional[int] = int(parent_a[1] * 1_00 ) + 1
a_ : Tuple = 10 if child_n >= 10 else child_n
for _ in range(_lowercase ):
a_ : Optional[int] = population_score[random.randint(0 , _lowercase )][0]
a_ : Optional[Any] = crossover(parent_a[0] , _lowercase )
# Append new string to the population list.
pop.append(mutate(_lowercase , _lowercase ) )
pop.append(mutate(_lowercase , _lowercase ) )
return pop
def _UpperCAmelCase ( __A : Optional[int] , __A : Optional[Any] , __A : List[str] = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a_ : List[Any] = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
a_ : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a_ : List[Any] = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_lowercase )
# Generate random starting population.
a_ : Union[str, Any] = []
for _ in range(_lowercase ):
population.append(''''''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
a_ : Dict = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a_ : List[Any] = [evaluate(_lowercase , _lowercase ) for item in population]
# Check if there is a matching evolution.
a_ : Any = sorted(_lowercase , key=lambda __A : x[1] , reverse=_lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a_ : Union[str, Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowercase )
# Normalize population score to be between 0 and 1.
a_ : List[Any] = [
(item, score / len(_lowercase )) for item, score in population_score
]
# This is selection
for i in range(_lowercase ):
population.extend(select(population_score[int(_lowercase )] , _lowercase , _lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
__lowerCAmelCase = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
__lowerCAmelCase = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
__lowerCAmelCase = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 712 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "yolos"
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Any=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Tuple=3072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[512, 864] , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=100 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : List[Any]=5 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Any=0.1 , **__SCREAMING_SNAKE_CASE : str , ) -> Optional[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
a_ : int = hidden_size
a_ : List[Any] = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : Tuple = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : int = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : List[Any] = initializer_range
a_ : List[str] = layer_norm_eps
a_ : int = image_size
a_ : Optional[int] = patch_size
a_ : List[str] = num_channels
a_ : Optional[int] = qkv_bias
a_ : Optional[int] = num_detection_tokens
a_ : Dict = use_mid_position_embeddings
a_ : int = auxiliary_loss
# Hungarian matcher
a_ : Any = class_cost
a_ : Tuple = bbox_cost
a_ : Dict = giou_cost
# Loss coefficients
a_ : int = bbox_loss_coefficient
a_ : Union[str, Any] = giou_loss_coefficient
a_ : Dict = eos_coefficient
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float:
return 1e-4
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return 12
| 713 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 0 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : Tuple , __A : Optional[Any] , __A : List[str] ):
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def _UpperCAmelCase ( __A : Dict , __A : Union[str, Any] , __A : Dict , __A : Optional[int]="attention" ):
a_ : Tuple = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
a_ : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
a_ : str = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
a_ : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
a_ : Tuple = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
a_ : Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
a_ : Dict = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
a_ : List[str] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _UpperCAmelCase ( __A : Dict , __A : str , __A : Optional[int] , __A : Dict=False ):
if split_mlp_wi:
a_ : List[str] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
a_ : List[Any] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
a_ : Any = (wi_a, wi_a)
else:
a_ : str = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
a_ : Optional[Any] = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def _UpperCAmelCase ( __A : str , __A : int , __A : Any , __A : Dict ):
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def _UpperCAmelCase ( __A : List[str] , *, __A : List[str] , __A : Any , __A : List[Any] = False ):
a_ : List[Any] = traverse_util.flatten_dict(variables['''target'''] )
a_ : Optional[int] = {"""/""".join(__A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a_ : Optional[int] = """encoder/encoder/mlp/wi_0/kernel""" in old
print('''Split MLP:''' , __A )
a_ : Tuple = collections.OrderedDict()
# Shared embeddings.
a_ : List[Any] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
a_ : int = tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_attention_layer_norm''' )
a_ : Dict = tax_attention_lookup(__A , __A , '''encoder''' , '''attention''' )
a_ : List[Any] = layer_norm
a_ : Optional[Any] = k.T
a_ : Tuple = o.T
a_ : Any = q.T
a_ : Optional[int] = v.T
# Block i, layer 1 (MLP).
a_ : Optional[Any] = tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_mlp_layer_norm''' )
a_ : Tuple = tax_mlp_lookup(__A , __A , '''encoder''' , __A )
a_ : List[str] = layer_norm
if split_mlp_wi:
a_ : Dict = wi[0].T
a_ : Optional[int] = wi[1].T
else:
a_ : Optional[Any] = wi.T
a_ : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a_ : List[str] = tax_relpos_bias_lookup(
__A , __A , '''encoder''' ).T
a_ : List[Any] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
a_ : List[Any] = tax_relpos_bias_lookup(
__A , 0 , '''encoder''' ).T
a_ : Any = tax_relpos_bias_lookup(
__A , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
a_ : int = tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_self_attention_layer_norm''' )
a_ : Optional[int] = tax_attention_lookup(__A , __A , '''decoder''' , '''self_attention''' )
a_ : Dict = layer_norm
a_ : str = k.T
a_ : Optional[Any] = o.T
a_ : Union[str, Any] = q.T
a_ : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
a_ : Optional[int] = tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_cross_attention_layer_norm''' )
a_ : Optional[Any] = tax_attention_lookup(__A , __A , '''decoder''' , '''encoder_decoder_attention''' )
a_ : List[Any] = layer_norm
a_ : Union[str, Any] = k.T
a_ : Tuple = o.T
a_ : List[Any] = q.T
a_ : Dict = v.T
# Block i, layer 2 (MLP).
a_ : int = tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_mlp_layer_norm''' )
a_ : Union[str, Any] = tax_mlp_lookup(__A , __A , '''decoder''' , __A )
a_ : Optional[Any] = layer_norm
if split_mlp_wi:
a_ : int = wi[0].T
a_ : Dict = wi[1].T
else:
a_ : List[Any] = wi.T
a_ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a_ : Any = tax_relpos_bias_lookup(__A , __A , '''decoder''' ).T
a_ : str = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a_ : Dict = old["""decoder/logits_dense/kernel"""].T
return new
def _UpperCAmelCase ( __A : List[str] , __A : str ):
a_ : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a_ : Union[str, Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a_ : int = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
a_ : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def _UpperCAmelCase ( __A : List[Any] , __A : str , __A : Union[str, Any] , __A : Optional[Any] , __A : str ):
a_ : str = checkpoints.load_tax_checkpoint(__A )
a_ : Dict = convert_tax_to_pytorch(
__A , num_layers=config.num_layers , is_encoder_only=__A , scalable_attention=__A )
a_ : Union[str, Any] = make_state_dict(__A , __A )
model.load_state_dict(__A , strict=__A )
def _UpperCAmelCase ( __A : Tuple , __A : Any , __A : str , __A : Optional[Any] = False , __A : Dict = False , ):
a_ : Dict = MTaConfig.from_json_file(__A )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a_ : Dict = UMTaEncoderModel(__A )
else:
a_ : Dict = UMTaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A , __A , __A , __A , __A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__A )
# Verify that we can load the checkpoint.
model.from_pretrained(__A )
print('''Done''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
__lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 714 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowercase , '''num_attention_heads''' ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=13 , __SCREAMING_SNAKE_CASE : str=64 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : int=16 , __SCREAMING_SNAKE_CASE : Optional[int]=[128, 256, 384] , __SCREAMING_SNAKE_CASE : List[Any]=[4, 6, 8] , __SCREAMING_SNAKE_CASE : str=[2, 3, 4] , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16, 16] , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : str=[2, 2, 2] , __SCREAMING_SNAKE_CASE : Tuple=[2, 2, 2] , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : List[str]=2 , ) -> Tuple:
a_ : List[Any] = parent
a_ : Tuple = batch_size
a_ : Tuple = image_size
a_ : List[Any] = num_channels
a_ : Any = kernel_size
a_ : List[Any] = stride
a_ : Optional[int] = padding
a_ : Optional[int] = hidden_sizes
a_ : Any = num_attention_heads
a_ : int = depths
a_ : List[Any] = key_dim
a_ : Optional[int] = drop_path_rate
a_ : Tuple = patch_size
a_ : Union[str, Any] = attention_ratio
a_ : List[str] = mlp_ratio
a_ : str = initializer_range
a_ : Dict = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
a_ : Optional[int] = is_training
a_ : Tuple = use_labels
a_ : Optional[int] = num_labels
a_ : str = initializer_range
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Union[str, Any] = None
if self.use_labels:
a_ : int = ids_tensor([self.batch_size] , self.num_labels )
a_ : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
a_ : List[Any] = LevitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
a_ : Optional[Any] = model(__lowercase )
a_ : Optional[Any] = (self.image_size, self.image_size)
a_ , a_ : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
a_ : str = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
a_ : Any = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
a_ : Union[str, Any] = self.num_labels
a_ : Optional[int] = LevitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
a_ : List[Any] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
a_ : str = self.prepare_config_and_inputs()
a_ , a_ , a_ : Dict = config_and_inputs
a_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
snake_case__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : Tuple = LevitModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(__lowercase )
a_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Any = [*signature.parameters.keys()]
a_ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ):
a_ : str = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
a_ : List[Any] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
a_ : List[Any] = outputs.hidden_states
a_ : Optional[int] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowercase ) , __lowercase )
a_ : Union[str, Any] = (self.model_tester.image_size, self.model_tester.image_size)
a_ , a_ : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
a_ : str = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
a_ : Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[str] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Union[str, Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=False ) -> Any:
a_ : Optional[int] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
if not self.model_tester.is_training:
return
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : str = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowercase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
a_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
a_ : List[str] = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
a_ : Tuple = model(**__lowercase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a_ : Dict = False
a_ : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
a_ : Tuple = model_class(__lowercase )
model.gradient_checkpointing_enable()
model.to(__lowercase )
model.train()
a_ : Optional[int] = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
a_ : Any = model(**__lowercase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : int = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowercase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
a_ : List[str] = problem_type['''title''']
a_ : str = problem_type['''num_labels''']
a_ : Union[str, Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
a_ : Any = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if problem_type["num_labels"] > 1:
a_ : Any = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
a_ : str = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowercase ) as warning_list:
a_ : List[str] = model(**__lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Any = LevitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _UpperCAmelCase ( ):
a_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : Optional[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowercase )
a_ : Optional[Any] = self.default_image_processor
a_ : int = prepare_img()
a_ : Optional[int] = image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
a_ : Any = model(**__lowercase )
# verify the logits
a_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
a_ : Any = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 715 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def _UpperCAmelCase ( __A : Optional[Any] ):
if hor == 1_28:
a_ : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a_ : Tuple = (32, 1_28, 2_56)
a_ : List[str] = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
a_ : Union[str, Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a_ : str = (32, 64, 1_28, 2_56)
a_ : Union[str, Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
a_ : List[Any] = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
a_ : Optional[int] = model.state_dict()
a_ : Union[str, Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
a_ : int = UNetaDModel(**_lowerCAmelCase )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
a_ : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a_ : Tuple = state_dict.pop(_lowerCAmelCase )
hf_value_function.load_state_dict(_lowerCAmelCase )
torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def _UpperCAmelCase ( ):
a_ : Tuple = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
a_ : Tuple = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
a_ : int = model
a_ : Tuple = UNetaDModel(**_lowerCAmelCase )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
a_ : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a_ : Union[str, Any] = state_dict.pop(_lowerCAmelCase )
hf_value_function.load_state_dict(_lowerCAmelCase )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 716 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
a_ : List[Any] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
a_ : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a_ : Any = DDPMScheduler()
a_ : str = AudioDiffusionPipeline(vqvae=__SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 )
a_ : List[Any] = output.audios[0]
a_ : Dict = output.images[0]
a_ : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , steps=4 , return_dict=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : str = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a_ : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a_ : int = DDIMScheduler()
a_ : Dict = self.dummy_vqvae_and_unet
a_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : int = pipe(raw_audio=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
a_ : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a_ : List[str] = self.dummy_unet_condition
a_ : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
a_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
np.random.seed(0 )
a_ : Any = torch.rand((1, 1, 10) )
a_ : Tuple = pipe(generator=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.images[0]
a_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Any = torch_device
a_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
a_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(42 )
a_ : List[str] = pipe(generator=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = output.audios[0]
a_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a_ : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
a_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _UpperCAmelCase ( __A : str = "laptop" ):
a_ : Optional[Any] = f'https://www.amazon.in/laptop/s?k={product}'
a_ : Optional[Any] = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
a_ : List[str] = BeautifulSoup(requests.get(__A , headers=__A ).text )
# Initialize a Pandas dataframe with the column titles
a_ : List[str] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
a_ : Tuple = item.ha.text
a_ : Optional[Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
a_ : Optional[int] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
a_ : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
a_ : List[Any] = '''Not available'''
try:
a_ : List[Any] = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
a_ : Any = ''''''
try:
a_ : str = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
a_ : Any = float('''nan''' )
except AttributeError:
pass
a_ : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a_ : int = ''' '''
a_ : Union[str, Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__lowerCAmelCase = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 717 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : Tuple ):
a_ : Optional[Any] = str(__lowerCAmelCase )
return len(__lowerCAmelCase ) == 9 and set(__lowerCAmelCase ) == set('''123456789''' )
def _UpperCAmelCase ( ):
for base_num in range(99_99 , 49_99 , -1 ):
a_ : List[Any] = 10_00_02 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
a_ : Dict = 1_00_20_03 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 718 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = DDIMPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
a_ : str = DDIMScheduler()
a_ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> str:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Dict = '''cpu'''
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
a_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
a_ : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = '''google/ddpm-cifar10-32'''
a_ : Optional[Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Dict = DDIMScheduler()
a_ : List[str] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : int = '''google/ddpm-ema-bedroom-256'''
a_ : str = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Tuple = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.manual_seed(0 )
a_ : List[Any] = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a_ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 666 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _UpperCAmelCase ( __A : Optional[int] , __A : Dict , __A : List[Any]=None , __A : str=None ):
if attention_mask is None:
a_ : Any = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE :
snake_case__ = OPTConfig
snake_case__ = {}
snake_case__ = "gelu"
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=13 , __SCREAMING_SNAKE_CASE : List[Any]=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Any=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=20 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Any=16 , ) -> Optional[int]:
a_ : Union[str, Any] = parent
a_ : Dict = batch_size
a_ : str = seq_length
a_ : Dict = is_training
a_ : Dict = use_labels
a_ : List[Any] = vocab_size
a_ : Dict = hidden_size
a_ : int = num_hidden_layers
a_ : str = num_attention_heads
a_ : Dict = intermediate_size
a_ : Tuple = hidden_act
a_ : str = hidden_dropout_prob
a_ : str = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : Dict = eos_token_id
a_ : int = pad_token_id
a_ : str = bos_token_id
a_ : Optional[Any] = embed_dim
a_ : List[str] = word_embed_proj_dim
a_ : Any = False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
a_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a_ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a_ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
a_ : int = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
a_ : int = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
a_ : int = TFOPTModel(config=lowerCAmelCase_ )
a_ : Dict = inputs_dict['''input_ids''']
a_ : Dict = input_ids[:1, :]
a_ : Optional[int] = inputs_dict['''attention_mask'''][:1, :]
a_ : Dict = 1
# first forward pass
a_ : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
a_ , a_ : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a_ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
a_ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a_ : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
a_ : Dict = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a_ : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
a_ : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class SCREAMING_SNAKE_CASE ( __a , __a , unittest.TestCase ):
snake_case__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case__ = (TFOPTForCausalLM,) if is_tf_available() else ()
snake_case__ = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = 10
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : List[str] = TFOPTModelTester(self )
a_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ):
if hasattr(lowerCAmelCase_ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
a_ : Optional[int] = model_class(config=lowerCAmelCase_ )
a_ : List[str] = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
a_ : Dict = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
a_ : List[Any] = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
a_ : List[Any] = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
a_ : int = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
a_ : str = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a_ : int = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
a_ : Optional[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a_ : int = False
self.assertTrue(lowerCAmelCase_ )
def _UpperCAmelCase ( __A : Any ):
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ = 99
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
a_ : List[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
a_ : Optional[Any] = input_ids.shape[0]
a_ : Any = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : Any = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
a_ : Union[str, Any] = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
a_ : int = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
a_ : List[str] = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
a_ : Optional[int] = (1, 11, 512)
self.assertEqual(output.shape , lowerCAmelCase_ )
a_ : List[Any] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
a_ : List[str] = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
a_ : Union[str, Any] = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
super().setUp()
a_ : str = '''facebook/opt-350m'''
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Optional[int] = TFOPTForCausalLM.from_pretrained(self.path_model )
a_ : List[str] = GPTaTokenizer.from_pretrained(self.path_model )
a_ : Union[str, Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a_ : str = tokenizer(lowerCAmelCase_ , return_tensors='''tf''' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
a_ : Any = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
a_ : Optional[int] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
a_ : str = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
a_ : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Dict = '''facebook/opt-125m'''
a_ : Tuple = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
a_ : Any = []
a_ : List[str] = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
a_ : List[str] = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
a_ : List[str] = tokenizer(lowerCAmelCase_ , return_tensors='''tf''' ).input_ids
a_ : str = model.generate(lowerCAmelCase_ , max_length=10 )
a_ : List[Any] = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : Dict = '''facebook/opt-350m'''
a_ : Tuple = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
a_ : Tuple = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
a_ : List[Any] = '''left'''
# use different length sentences to test batching
a_ : int = [
'''Hello, my dog is a little''',
'''Today, I''',
]
a_ : Union[str, Any] = tokenizer(lowerCAmelCase_ , return_tensors='''tf''' , padding=lowerCAmelCase_ )
a_ : Tuple = inputs['''input_ids''']
a_ : Any = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['''attention_mask'''] )
a_ : List[str] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
a_ : List[str] = model.generate(input_ids=lowerCAmelCase_ )
a_ : List[str] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
a_ : Union[str, Any] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
a_ : Any = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
a_ : int = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
a_ : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
a_ : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
a_ : Optional[int] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : int = '''facebook/opt-350m'''
a_ : Dict = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
a_ : Optional[int] = []
a_ : List[Any] = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
a_ : int = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
a_ : int = tokenizer(lowerCAmelCase_ , return_tensors='''tf''' ).input_ids
a_ : List[str] = model.generate(lowerCAmelCase_ , max_length=10 )
a_ : str = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 719 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="Translation" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ) -> Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = None
snake_case__ = None
snake_case__ = None
# Automatically constructed
snake_case__ = "dict"
snake_case__ = None
snake_case__ = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
a_ : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
a_ : str = set(self.languages )
if self.languages and set(__SCREAMING_SNAKE_CASE ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({", ".join(__SCREAMING_SNAKE_CASE )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ : int = []
for lang, text in translation_dict.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a_ , a_ : List[Any] = zip(*sorted(__SCREAMING_SNAKE_CASE ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 666 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase ):
snake_case__ = OpenAIGPTTokenizer
snake_case__ = OpenAIGPTTokenizerFast
snake_case__ = True
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
a_ : Optional[Any] = dict(zip(__a , range(len(__a ) ) ) )
a_ : Optional[int] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
a_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__a ) )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> str:
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : List[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
a_ : str = """lower"""
a_ : str = ["""low""", """er</w>"""]
a_ : List[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
a_ : Union[str, Any] = tokens + ["""<unk>"""]
a_ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=15 ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
a_ : List[Any] = """This is a simple input"""
a_ : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
a_ : Any = ("""This is a simple input""", """This is a pair""")
a_ : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __lowercase ):
pass
| 720 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a_ : str = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
a_ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Optional[int] = {'''unk_token''': '''<unk>'''}
a_ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : int = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : List[str] = self.get_dummy_dataset()
a_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : Tuple = dataset
a_ : Any = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : bool ) -> Dict:
a_ : Dict = self.get_dummy_dataset()
a_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
a_ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
a_ : str = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
a_ : Optional[Any] = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE ) , )
return retriever
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
a_ : str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
a_ : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
a_ : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__SCREAMING_SNAKE_CASE , open(__SCREAMING_SNAKE_CASE , '''wb''' ) )
a_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
a_ : int = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : Optional[Any] = 1
a_ : Dict = self.get_dummy_canonical_hf_index_retriever()
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : str = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
a_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[str] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = 1
a_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : List[str] = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Union[str, Any] = 1
a_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Tuple = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Dict = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
a_ : str = 1
a_ : Tuple = self.get_dummy_legacy_index_retriever()
a_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ , a_ , a_ : Any = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : Any = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : Optional[Any] = retriever.retrieve(__SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
a_ : Any = 1
a_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
a_ : Union[str, Any] = [[5, 7], [10, 11]]
a_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : str = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : List[str] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
a_ : Any = retriever(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
a_ , a_ , a_ , a_ : str = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : str = self.get_dpr_ctx_encoder_tokenizer()
a_ : Tuple = 1
a_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(__SCREAMING_SNAKE_CASE )
a_ : Dict = [[5, 7], [10, 11]]
a_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
a_ : List[Any] = retriever(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=__SCREAMING_SNAKE_CASE )
self.assertEqual(
len(__SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 666 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCAmelCase = object()
# For specifying empty leaf dict `{}`
__lowerCAmelCase = object()
def _UpperCAmelCase ( __A : List[Any] , __A : List[Any] ):
a_ : List[str] = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ):
a_ : Union[str, Any] = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )]
if matches and all(_UpperCamelCase ):
return True
return False
def _UpperCAmelCase ( __A : Union[str, Any] ):
def replace(__A : int , __A : Optional[int] ):
for rule, replacement in rules:
if _match(_UpperCamelCase , _UpperCamelCase ):
return replacement
return val
return replace
def _UpperCAmelCase ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , _UpperCamelCase )),
(("transformer", "wte", "embedding"), P('''mp''' , _UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , _UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_UpperCamelCase , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , _UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase ( __A : str ):
a_ : int = _get_partition_rules()
a_ : Dict = _replacement_rules(_UpperCamelCase )
a_ : Optional[Any] = {k: _unmatched for k in flatten_dict(_UpperCamelCase )}
a_ : Optional[Any] = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_UpperCamelCase ) )
| 721 |
'''simple docstring'''
from math import pi, sqrt, tan
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a_ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( __A : float , __A : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( __A : float , __A : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__A , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( __A : float , __A : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCAmelCase ( __A : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a_ : int = (sidea + sidea + sidea) / 2
a_ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( __A : float , __A : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCAmelCase ( __A : float , __A : float , __A : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( __A : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCAmelCase ( __A : float , __A : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( __A : float , __A : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( __A : int , __A : float ):
if not isinstance(__A , __A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 666 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Any ) -> int:
pass
def _UpperCAmelCase ( __A : Image ):
'''simple docstring'''
a_ : int = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _UpperCAmelCase ( __A : Image ):
'''simple docstring'''
a_ : int = np.array(__A )
a_ : int = npimg.shape
return {"hash": hashimage(__A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
a_ : Dict = MaskGenerationPipeline(model=A__ , image_processor=A__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : List[Any] = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
a_ : Optional[int] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
a_ : Optional[Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(A__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : int = '''facebook/sam-vit-huge'''
a_ : int = pipeline('''mask-generation''' , model=A__ )
a_ : Dict = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
a_ : Union[str, Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(A__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
] , )
| 700 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = IFInpaintingSuperResolutionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 666 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The column name of the images in the files."} )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "A folder containing the training data."} )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "A folder containing the validation data."} )
snake_case__ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : Optional[Any] = {}
if self.train_dir is not None:
a_ : Dict = self.train_dir
if self.validation_dir is not None:
a_ : Optional[int] = self.validation_dir
a_ : Any = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch."
)
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
snake_case__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Name or path of preprocessor config."} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def _UpperCAmelCase ( __A : List[Any] ):
a_ : List[str] = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a_ : int = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
a_ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
a_ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
a_ : Dict = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
a_ : Dict = ds['''train'''].train_test_split(data_args.train_val_split )
a_ : Any = split['''train''']
a_ : str = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : Any = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
a_ : int = ViTMAEConfig.from_pretrained(model_args.config_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
a_ : List[str] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
a_ : Union[str, Any] = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
a_ : List[str] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
a_ : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
a_ : Optional[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
a_ : List[str] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
a_ : List[Any] = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
if training_args.do_train:
a_ : Union[str, Any] = ds['''train'''].column_names
else:
a_ : Optional[int] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
a_ : Optional[int] = data_args.image_column_name
elif "image" in column_names:
a_ : int = '''image'''
elif "img" in column_names:
a_ : Any = '''img'''
else:
a_ : Optional[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
a_ : List[str] = image_processor.size['''shortest_edge''']
else:
a_ : Union[str, Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
a_ : Optional[Any] = Compose(
[
Lambda(lambda __A : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_SCREAMING_SNAKE_CASE , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__A : str ):
a_ : List[Any] = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
a_ : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
a_ : List[Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Compute absolute learning rate
a_ : List[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
a_ : List[Any] = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
a_ : Optional[Any] = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
a_ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
a_ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ : Optional[int] = last_checkpoint
a_ : List[Any] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a_ : Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
a_ : Dict = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( __A : List[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666 | 0 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
__lowerCAmelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__lowerCAmelCase = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__lowerCAmelCase = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=False ) -> List[str]:
if concatenate_texts:
return compute_measures(__UpperCamelCase , __UpperCamelCase )["wer"]
else:
a_ : Optional[int] = 0
a_ : str = 0
for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase ):
a_ : Any = compute_measures(__UpperCamelCase , __UpperCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 702 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=25_0880 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Any=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
a_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a_ : Any = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = hidden_size if n_embed is None else n_embed
a_ : int = n_layer
a_ : str = n_head
a_ : Optional[int] = layer_norm_epsilon
a_ : Dict = initializer_range
a_ : List[str] = use_cache
a_ : Dict = pretraining_tp
a_ : Optional[Any] = apply_residual_connection_post_layernorm
a_ : Optional[Any] = hidden_dropout
a_ : List[str] = attention_dropout
a_ : Dict = bos_token_id
a_ : Optional[int] = eos_token_id
a_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = version.parse("1.12" )
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
a_ : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizer" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
a_ : Dict = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
a_ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a_ , a_ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a_ : str = seqlen + 2
a_ : Any = self._config.hidden_size // self.num_attention_heads
a_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ : List[str] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
a_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
a_ : Optional[int] = ordered_inputs['''attention_mask'''].dtype
a_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 13
| 666 | 0 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _UpperCAmelCase ( __A : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _UpperCAmelCase ( __A : Optional[Any] ): # picklable for multiprocessing
return i + 1
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = 42
snake_case__ = 42
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
a_ : Tuple = {}
a_ : Any = []
a_ : Any = 1
a_ : List[str] = [1, 2]
a_ : Dict = {"""a""": 1, """b""": 2}
a_ : Tuple = {"""a""": [1, 2], """b""": [3, 4]}
a_ : Optional[Any] = {"""a""": {"""1""": 1}, """b""": 2}
a_ : List[str] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
a_ : List[str] = {}
a_ : List[Any] = []
a_ : Optional[int] = 2
a_ : List[Any] = [2, 3]
a_ : List[str] = {"""a""": 2, """b""": 3}
a_ : Tuple = {"""a""": [2, 3], """b""": [4, 5]}
a_ : Optional[Any] = {"""a""": {"""1""": 2}, """b""": 3}
a_ : Optional[int] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
a_ : Optional[Any] = 2
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
a_ : List[str] = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
a_ : List[str] = {"""a""": 2, """b""": 0, """c""": 2}
a_ : Any = {
"""a""": np.eye(2 ).astype(lowercase__ ),
"""b""": np.zeros(3 ).astype(lowercase__ ),
"""c""": np.ones(2 ).astype(lowercase__ ),
}
self.assertEqual(map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ ) , lowercase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ , num_proc=lowercase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowercase__ ): # can't pickle a local lambda
map_nested(lambda __SCREAMING_SNAKE_CASE : x + 1 , lowercase__ , num_proc=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : List[str] = {"""a""": 1, """b""": 2}
a_ : Dict = {"""a""": 3, """b""": 4}
a_ : Dict = {"""a""": 5, """b""": 6}
a_ : List[Any] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowercase__ , lowercase__ , lowercase__ ) ) , lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
class SCREAMING_SNAKE_CASE :
snake_case__ = """bar"""
a_ : Dict = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(lowercase__ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _UpperCAmelCase ( __A : Tuple , __A : int , __A : Optional[Any] ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
a_ : Optional[Any] = {f'{i}': i for i in range(SCREAMING_SNAKE_CASE__ )}
a_ : Union[str, Any] = map_nested(lambda __A : x + 10 , SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
@require_tf
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
import tensorflow as tf
from tensorflow.keras import layers
a_ : Tuple = layers.Dense(2 )
def gen_random_output():
a_ : List[Any] = tf.random.uniform((1, 3) )
return model(lowercase__ ).numpy()
with temp_seed(42 , set_tensorflow=lowercase__ ):
a_ : Tuple = gen_random_output()
with temp_seed(42 , set_tensorflow=lowercase__ ):
a_ : str = gen_random_output()
a_ : Optional[int] = gen_random_output()
np.testing.assert_equal(lowercase__ , lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
import torch
def gen_random_output():
a_ : Any = torch.nn.Linear(3 , 2 )
a_ : Optional[int] = torch.rand(1 , 3 )
return model(lowercase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowercase__ ):
a_ : Optional[Any] = gen_random_output()
with temp_seed(42 , set_pytorch=lowercase__ ):
a_ : Optional[Any] = gen_random_output()
a_ : Tuple = gen_random_output()
np.testing.assert_equal(lowercase__ , lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
a_ : List[Any] = gen_random_output()
with temp_seed(42 ):
a_ : Union[str, Any] = gen_random_output()
a_ : Optional[Any] = gen_random_output()
np.testing.assert_equal(lowercase__ , lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def _UpperCAmelCase ( __A : str ):
a_ : Union[str, Any] = NestedDataStructure(SCREAMING_SNAKE_CASE__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def _UpperCAmelCase ( __A : Tuple , __A : List[Any] ):
a_ : Dict = NestedDataStructure(SCREAMING_SNAKE_CASE__ ).flatten()
assert output == expected_output
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = A(x=1 , y='''foobar''' )
a_ : List[Any] = {"""x""": 1, """y""": """foobar"""}
assert asdict(SCREAMING_SNAKE_CASE__ ) == expected_output
a_ : Tuple = {"""a""": {"""b""": A(x=10 , y='''foo''' )}, """c""": [A(x=20 , y='''bar''' )]}
a_ : Union[str, Any] = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(SCREAMING_SNAKE_CASE__ ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
asdict([1, A(x=10 , y='''foo''' )] )
def _UpperCAmelCase ( __A : str ):
return text.split()
def _UpperCAmelCase ( __A : Dict ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _UpperCAmelCase ( ):
with Pool(2 ) as pool:
a_ : Optional[int] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(SCREAMING_SNAKE_CASE__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
a_ : Union[str, Any] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(SCREAMING_SNAKE_CASE__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
a_ : Optional[int] = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE__ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(SCREAMING_SNAKE_CASE__ ) == 4
| 704 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case__ = CycleDiffusionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
torch.manual_seed(0 )
a_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
a_ : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a_ : Optional[int] = CLIPTextModel(__snake_case )
a_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a_ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=0 ) -> List[Any]:
a_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
a_ : int = image / 2 + 0.5
if str(__snake_case ).startswith('''mps''' ):
a_ : Union[str, Any] = torch.manual_seed(__snake_case )
else:
a_ : List[str] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a_ : str = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a_ : int = self.get_dummy_components()
a_ : Optional[int] = CycleDiffusionPipeline(**__snake_case )
a_ : Tuple = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a_ : int = self.get_dummy_inputs(__snake_case )
a_ : Optional[Any] = pipe(**__snake_case )
a_ : int = output.images
a_ : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a_ : Any = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Union[str, Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__snake_case , '''half''' ):
a_ : Tuple = module.half()
a_ : List[str] = CycleDiffusionPipeline(**__snake_case )
a_ : Any = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a_ : List[Any] = self.get_dummy_inputs(__snake_case )
a_ : Any = pipe(**__snake_case )
a_ : Dict = output.images
a_ : List[str] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a_ : Optional[int] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
a_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
a_ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
a_ : Optional[Any] = init_image.resize((512, 512) )
a_ : List[Any] = '''CompVis/stable-diffusion-v1-4'''
a_ : List[Any] = DDIMScheduler.from_pretrained(__snake_case , subfolder='''scheduler''' )
a_ : Dict = CycleDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , safety_checker=__snake_case , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a_ : Union[str, Any] = '''A black colored car'''
a_ : List[Any] = '''A blue colored car'''
a_ : Optional[Any] = torch.manual_seed(0 )
a_ : str = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='''np''' , )
a_ : Optional[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
a_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
a_ : Tuple = init_image.resize((512, 512) )
a_ : List[str] = '''CompVis/stable-diffusion-v1-4'''
a_ : Any = DDIMScheduler.from_pretrained(__snake_case , subfolder='''scheduler''' )
a_ : Optional[Any] = CycleDiffusionPipeline.from_pretrained(__snake_case , scheduler=__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a_ : Optional[int] = '''A black colored car'''
a_ : Optional[int] = '''A blue colored car'''
a_ : Optional[Any] = torch.manual_seed(0 )
a_ : str = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='''np''' , )
a_ : Optional[Any] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 705 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : list[int] ):
a_ : int = len(__A ) // 2
# choose the middle 3 elements
a_ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _UpperCAmelCase ( __A : List[str] ):
a_ : List[str] = []
for line in lines:
a_ : Optional[int] = re.sub(R'''#.*''' , '''''' , __A ) # remove comments
if line:
filtered_lines.append(__A )
a_ : Union[str, Any] = '''\n'''.join(__A )
# Make a hash from all this code
a_ : int = full_str.encode('''utf-8''' )
return shaaaa(__A ).hexdigest()
# get importable module names and hash for caching
__lowerCAmelCase = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowerCAmelCase = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowerCAmelCase = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__lowerCAmelCase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 706 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
__lowerCAmelCase = '2020.9.26'
__lowerCAmelCase = 'xcodz-dot, cclaus, dhruvmanila'
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] , __A : Tuple , __A : Dict , __A : List[str] ):
if not all(isinstance(lowercase_ , (float, int) ) for val in locals().values() ):
a_ : Any = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(lowercase_ )
a_ : Tuple = ((x * distance) / (z + distance)) * scale
a_ : Union[str, Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _UpperCAmelCase ( __A : Optional[int] , __A : List[str] , __A : Optional[int] , __A : Any , __A : Union[str, Any] ):
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('''Axis must be a str''' )
a_ : List[str] = locals()
del input_variables["axis"]
if not all(isinstance(lowercase_ , (float, int) ) for val in input_variables.values() ):
a_ : Any = (
"Input values except axis must either be float or int: "
f'{list(input_variables.values() )}'
)
raise TypeError(lowercase_ )
a_ : List[str] = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
a_ : Tuple = x * math.cos(lowercase_ ) - y * math.sin(lowercase_ )
a_ : str = y * math.cos(lowercase_ ) + x * math.sin(lowercase_ )
a_ : Union[str, Any] = z
elif axis == "x":
a_ : Any = y * math.cos(lowercase_ ) - z * math.sin(lowercase_ )
a_ : List[Any] = z * math.cos(lowercase_ ) + y * math.sin(lowercase_ )
a_ : Optional[Any] = x
elif axis == "y":
a_ : Any = x * math.cos(lowercase_ ) - z * math.sin(lowercase_ )
a_ : Dict = z * math.cos(lowercase_ ) + x * math.sin(lowercase_ )
a_ : List[Any] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.