code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCamelCase__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
inspect_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Dict =path + '''.py'''
assert script_name in os.listdir(SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def lowercase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
inspect_metric(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =path + '''.py'''
assert script_name in os.listdir(SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
snake_case__ : str =get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE ):
get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : int =get_dataset_config_names(SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
snake_case__ : str =get_dataset_infos(SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
snake_case__ : Any =expected_configs[0]
assert expected_config in infos
snake_case__ : int =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : str =get_dataset_infos(SCREAMING_SNAKE_CASE )
assert expected_config in infos
snake_case__ : int =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE ):
get_dataset_split_names(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
| 381 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _A="" , _A="train" ):
'''simple docstring'''
assert os.path.isdir(_A )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =os.listdir(_A )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_SCREAMING_SNAKE_CASE =os.path.join(_A , _A )
if not os.path.isfile(_A ):
continue
self.documents.append(_A )
def __len__( self ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.documents[idx]
_SCREAMING_SNAKE_CASE =document_path.split('''/''' )[-1]
with open(_A , encoding='''utf-8''' ) as source:
_SCREAMING_SNAKE_CASE =source.read()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =process_story(_A )
return document_name, story_lines, summary_lines
def _lowerCAmelCase(a : str ) -> Any:
_SCREAMING_SNAKE_CASE =list(filter(lambda a : len(a ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
_SCREAMING_SNAKE_CASE =[_add_missing_period(a ) for line in nonempty_lines]
# gather article lines
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =deque(a )
while True:
try:
_SCREAMING_SNAKE_CASE =lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(a )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_SCREAMING_SNAKE_CASE =list(filter(lambda a : not t.startswith('''@highlight''' ) , a ) )
return story_lines, summary_lines
def _lowerCAmelCase(a : Optional[Any] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE =['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _lowerCAmelCase(a : Optional[int] , a : Optional[Any] , a : Tuple ) -> Union[str, Any]:
if len(a ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(a )) )
return sequence
def _lowerCAmelCase(a : Optional[int] , a : Any ) -> Dict:
_SCREAMING_SNAKE_CASE =torch.ones_like(a )
_SCREAMING_SNAKE_CASE =sequence == pad_token_id
_SCREAMING_SNAKE_CASE =0
return mask
def _lowerCAmelCase(a : List[str] , a : Union[str, Any] , a : Optional[Any] ) -> Dict:
_SCREAMING_SNAKE_CASE =[tokenizer.encode(a ) for line in story_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in story_lines_token_ids for token in sentence]
_SCREAMING_SNAKE_CASE =[tokenizer.encode(a ) for line in summary_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _lowerCAmelCase(a : Optional[int] , a : str ) -> int:
_SCREAMING_SNAKE_CASE =[]
for sequence in batch:
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(a )
return torch.tensor(a )
| 255 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Tuple = DanceDiffusionPipeline
a : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a : str = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
a : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a : Union[str, Any] = False
a : str = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A_ , use_timestep_embedding=A_ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
__lowercase = IPNDMScheduler()
__lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : Tuple , A_ : List[str]=0 ):
'''simple docstring'''
if str(A_ ).startswith("""mps""" ):
__lowercase = torch.manual_seed(A_ )
else:
__lowercase = torch.Generator(device=A_ ).manual_seed(A_ )
__lowercase = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = DanceDiffusionPipeline(**A_ )
__lowercase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowercase = self.get_dummy_inputs(A_ )
__lowercase = pipe(**A_ )
__lowercase = output.audios
__lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowercase = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = torch_device
__lowercase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
__lowercase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(generator=A_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowercase = output.audios
__lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowercase = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = torch_device
__lowercase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
__lowercase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(generator=A_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowercase = output.audios
__lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowercase = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 709 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( UpperCamelCase__ : Callable , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(UpperCamelCase__ ):
__lowercase = y[k] + step_size * ode_func(UpperCamelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 442 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : Tuple = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''MaskFormerFeatureExtractor''']
a : Dict = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
a : List[Any] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 555 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(1 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(2 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(3 )
_UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 555 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__( snake_case__ , snake_case__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Optional[int] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase__ : Dict =torch.zeros(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
else:
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Any =torch.nn.Parameter(__SCREAMING_SNAKE_CASE)
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__SCREAMING_SNAKE_CASE , transformer=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict =len(__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else 1
# get prompt text embeddings
UpperCamelCase__ : Dict =self.tokenizer(
__SCREAMING_SNAKE_CASE , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase__ : List[Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''')
UpperCamelCase__ : Any =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ : List[str] =self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase__ : str =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__SCREAMING_SNAKE_CASE)
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ : str =prompt_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase__ : Tuple =self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase__ : Optional[Any] =negative_prompt_embeds.unsqueeze(0).repeat(__SCREAMING_SNAKE_CASE , 1 , 1)
else:
UpperCamelCase__ : Dict =[""] * batch_size
UpperCamelCase__ : Union[str, Any] =text_input_ids.shape[-1]
UpperCamelCase__ : List[Any] =self.tokenizer(
__SCREAMING_SNAKE_CASE , padding="max_length" , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="pt" , )
UpperCamelCase__ : Dict =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
UpperCamelCase__ : Any =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__SCREAMING_SNAKE_CASE)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ : List[str] =negative_prompt_embeds.shape[1]
UpperCamelCase__ : Optional[int] =negative_prompt_embeds.repeat(1 , __SCREAMING_SNAKE_CASE , 1)
UpperCamelCase__ : int =negative_prompt_embeds.view(batch_size * num_images_per_prompt , __SCREAMING_SNAKE_CASE , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : Dict =torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_00 , __SCREAMING_SNAKE_CASE = 5.0 , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Optional[Any] =1
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Tuple =len(__SCREAMING_SNAKE_CASE)
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__SCREAMING_SNAKE_CASE)}''')
UpperCamelCase__ : List[str] =batch_size * num_images_per_prompt
UpperCamelCase__ : Tuple =guidance_scale > 1.0
UpperCamelCase__ : List[str] =self._encode_prompt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__SCREAMING_SNAKE_CASE)}.''')
# get the initial completely masked latents unless the user supplied it
UpperCamelCase__ : Any =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase__ : Optional[Any] =self.transformer.num_vector_embeds - 1
UpperCamelCase__ : Tuple =torch.full(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''')
UpperCamelCase__ : Union[str, Any] =latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
UpperCamelCase__ : Optional[Any] =self.scheduler.timesteps.to(self.device)
UpperCamelCase__ : Optional[int] =latents
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE)):
# expand the sample if we are doing classifier free guidance
UpperCamelCase__ : Optional[Any] =torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase__ : List[str] =self.transformer(__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE).sample
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ : Any =model_output.chunk(2)
UpperCamelCase__ : str =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__SCREAMING_SNAKE_CASE , dim=1 , keepdim=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Union[str, Any] =self.truncate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# remove `log(0)`'s (`-inf`s)
UpperCamelCase__ : Optional[Any] =model_output.clamp(-70)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : Dict =self.scheduler.step(__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =self.vqvae.config.vq_embed_dim
UpperCamelCase__ : Any =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase__ : Dict =self.vqvae.quantize.get_codebook_entry(__SCREAMING_SNAKE_CASE , shape=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any =self.vqvae.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE).sample
UpperCamelCase__ : List[Any] =(image / 2 + 0.5).clamp(0 , 1)
UpperCamelCase__ : List[Any] =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCamelCase__ : List[str] =self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Optional[int] =torch.sort(__SCREAMING_SNAKE_CASE , 1 , descending=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =torch.exp(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] =sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase__ : Any =torch.full_like(keep_mask[:, 0:1, :] , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =torch.cat((all_true, keep_mask) , dim=1)
UpperCamelCase__ : Union[str, Any] =keep_mask[:, :-1, :]
UpperCamelCase__ : Union[str, Any] =keep_mask.gather(1 , indices.argsort(1))
UpperCamelCase__ : Dict =log_p_x_0.clone()
UpperCamelCase__ : Dict =-torch.inf # -inf = log(0)
return rv
| 582 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowercase__:
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( A_ : str ) -> Dict:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__UpperCAmelCase = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
'''simple docstring'''
snake_case__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =pipeline(
"document-question-answering" , model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =INVOICE_URL
UpperCamelCase__ : Tuple =list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , "")))
UpperCamelCase__ : int ="What is the placebo?"
UpperCamelCase__ : Optional[Any] =[
{
"image": load_image(__SCREAMING_SNAKE_CASE),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =dqa_pipeline(__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[
{"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE), "start": ANY(__SCREAMING_SNAKE_CASE), "end": ANY(__SCREAMING_SNAKE_CASE)},
{"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE), "start": ANY(__SCREAMING_SNAKE_CASE), "end": ANY(__SCREAMING_SNAKE_CASE)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2")
UpperCamelCase__ : Tuple =INVOICE_URL
UpperCamelCase__ : Dict ="How many cats are there?"
UpperCamelCase__ : List[Any] =[
{"score": 0.00_01, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.00_01, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCamelCase__ : str =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Union[str, Any] =dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , __SCREAMING_SNAKE_CASE)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCamelCase__ : Optional[int] ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCamelCase__ : List[str] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(__SCREAMING_SNAKE_CASE , [])
# We can optionnally pass directly the words and bounding boxes
UpperCamelCase__ : Any ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCamelCase__ : Tuple =[]
UpperCamelCase__ : Union[str, Any] =[]
UpperCamelCase__ : Optional[int] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , words=__SCREAMING_SNAKE_CASE , boxes=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(__SCREAMING_SNAKE_CASE , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] =pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCamelCase__ : Dict =INVOICE_URL
UpperCamelCase__ : List[Any] ="What is the invoice number?"
UpperCamelCase__ : Any =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : Dict =dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : List[Any] =dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
[
{"score": 0.99_44, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.00_09, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
UpperCamelCase__ : List[Any] =pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCamelCase__ : Tuple =INVOICE_URL
UpperCamelCase__ : Dict ="What is the invoice number?"
UpperCamelCase__ : List[Any] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : Optional[int] =dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : List[str] =dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
[
{"score": 0.99_74, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.99_48, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple =pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__SCREAMING_SNAKE_CASE , revision="3dc6de3" , )
UpperCamelCase__ : List[str] =INVOICE_URL
UpperCamelCase__ : Tuple ="What is the invoice number?"
UpperCamelCase__ : Union[str, Any] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCamelCase__ : Optional[int] =dqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCamelCase__ : Optional[int] =dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
[
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCamelCase__ : Optional[Any] =list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , "")))
# This model should also work if `image` is set to None
UpperCamelCase__ : List[Any] =dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.42_51, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.08_19, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__SCREAMING_SNAKE_CASE , revision="3dc6de3" , max_seq_len=50 , )
UpperCamelCase__ : str =INVOICE_URL
UpperCamelCase__ : List[str] ="What is the invoice number?"
UpperCamelCase__ : Union[str, Any] =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ : int =dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
[
{"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCamelCase__ : List[Any] =list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , "")))
# This model should also work if `image` is set to None
UpperCamelCase__ : Optional[Any] =dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [
{"score": 0.99_99, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.99_98, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa") , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCamelCase__ : Optional[Any] =INVOICE_URL
UpperCamelCase__ : Dict ="What is the invoice number?"
UpperCamelCase__ : Any =dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [{"answer": "us-001"}])
@require_tf
@unittest.skip("Document question answering not implemented in TF")
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
| 582 | 1 |
import numpy as np
__lowerCamelCase : Union[str, Any] = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : List[str] = np.array(A_ )
def _lowercase ( self : Any , __A : List[Any] ):
snake_case__, snake_case__ : Any = np.where(letter == self.SQUARE )
snake_case__ : Tuple = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _lowercase ( self : Tuple , __A : Any , __A : Tuple ):
snake_case__ : Optional[int] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _lowercase ( self : List[Any] , __A : Optional[int] ):
snake_case__ : Optional[Any] = message.lower()
snake_case__ : Any = message.replace(" " , "" )
snake_case__ : Dict = message.replace("j" , "i" )
snake_case__ : List[str] = np.empty((2, len(A_ )) )
for letter_index in range(len(A_ ) ):
snake_case__ : int = self.letter_to_numbers(message[letter_index] )
snake_case__ : Tuple = numbers[0]
snake_case__ : Optional[Any] = numbers[1]
snake_case__ : Any = first_step.reshape(2 * len(A_ ) )
snake_case__ : List[Any] = ""
for numbers_index in range(len(A_ ) ):
snake_case__ : Dict = int(second_step[numbers_index * 2] )
snake_case__ : int = int(second_step[(numbers_index * 2) + 1] )
snake_case__ : List[Any] = self.numbers_to_letter(A_ , A_ )
snake_case__ : List[Any] = encoded_message + letter
return encoded_message
def _lowercase ( self : Tuple , __A : Dict ):
snake_case__ : Any = message.lower()
message.replace(" " , "" )
snake_case__ : Tuple = np.empty(2 * len(A_ ) )
for letter_index in range(len(A_ ) ):
snake_case__ : List[Any] = self.letter_to_numbers(message[letter_index] )
snake_case__ : List[str] = numbers[0]
snake_case__ : List[str] = numbers[1]
snake_case__ : Optional[int] = first_step.reshape((2, len(A_ )) )
snake_case__ : Union[str, Any] = ""
for numbers_index in range(len(A_ ) ):
snake_case__ : Optional[Any] = int(second_step[0, numbers_index] )
snake_case__ : Tuple = int(second_step[1, numbers_index] )
snake_case__ : Dict = self.numbers_to_letter(A_ , A_ )
snake_case__ : Optional[Any] = decoded_message + letter
return decoded_message
| 297 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=[0, 1, 2, 3] , )-> Any:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = 100
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = out_indices
UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = BeitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase = False
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
# prepare bool_masked_pos
UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(pixel_values=A_ , bool_masked_pos=A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21841) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=A_ , )
else:
UpperCamelCase = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , A_ )
| 3 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case__ : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = ['''MobileViTFeatureExtractor''']
snake_case__ : Optional[Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 637 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case_( a__ ):
__UpperCamelCase = '''levit'''
def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[int] = kernel_size
lowerCAmelCase : Dict = stride
lowerCAmelCase : List[Any] = padding
lowerCAmelCase : Dict = hidden_sizes
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = depths
lowerCAmelCase : Dict = key_dim
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Tuple = attention_ratio
lowerCAmelCase : Optional[int] = mlp_ratio
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case_( a__ ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Tuple ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return 1E-4
| 637 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase ( snake_case__ , snake_case__ ):
@register_to_config
def __init__( self : str ,*,
_lowerCAmelCase : int = 4 ,_lowerCAmelCase : int = 768 ,_lowerCAmelCase : int ,_lowerCAmelCase : Any ,):
"""simple docstring"""
super().__init__()
__snake_case = nn.Parameter(torch.zeros(_lowerCAmelCase ) )
# parameters for additional clip time embeddings
__snake_case = nn.Linear(_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = nn.Linear(_lowerCAmelCase ,_lowerCAmelCase )
# parameters for encoder hidden states
__snake_case = clip_extra_context_tokens
__snake_case = nn.Linear(
_lowerCAmelCase ,self.clip_extra_context_tokens * cross_attention_dim )
__snake_case = nn.Linear(_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = nn.LayerNorm(_lowerCAmelCase )
def UpperCamelCase_ ( self : int ,*, _lowerCAmelCase : List[str] ,_lowerCAmelCase : List[Any] ,_lowerCAmelCase : List[Any] ,_lowerCAmelCase : List[str] ):
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__snake_case = image_embeddings.shape[0]
__snake_case = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__snake_case = classifier_free_guidance_embeddings.expand(
_lowerCAmelCase ,-1 )
__snake_case = torch.cat([classifier_free_guidance_embeddings, image_embeddings] ,dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__snake_case = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__snake_case = self.embedding_proj(_lowerCAmelCase )
__snake_case = self.clip_image_embeddings_project_to_time_embeddings(_lowerCAmelCase )
__snake_case = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__snake_case = self.clip_extra_context_tokens_proj(_lowerCAmelCase )
__snake_case = clip_extra_context_tokens.reshape(_lowerCAmelCase ,-1 ,self.clip_extra_context_tokens )
__snake_case = clip_extra_context_tokens.permute(0 ,2 ,1 )
__snake_case = self.encoder_hidden_states_proj(_lowerCAmelCase )
__snake_case = self.text_encoder_hidden_states_norm(_lowerCAmelCase )
__snake_case = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] ,dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 524 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase__ = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase__ = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _lowerCamelCase( __snake_case ) -> Tuple:
__snake_case = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __snake_case )
return [m.group(0 ) for m in matches]
def _lowerCamelCase( ) -> Optional[int]:
__snake_case = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__snake_case = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__snake_case = collections.defaultdict(__snake_case )
__snake_case = collections.defaultdict(__snake_case )
__snake_case = collections.defaultdict(__snake_case )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__snake_case ):
__snake_case = None
if _re_tf_models.match(__snake_case ) is not None:
__snake_case = tf_models
__snake_case = _re_tf_models.match(__snake_case ).groups()[0]
elif _re_flax_models.match(__snake_case ) is not None:
__snake_case = flax_models
__snake_case = _re_flax_models.match(__snake_case ).groups()[0]
elif _re_pt_models.match(__snake_case ) is not None:
__snake_case = pt_models
__snake_case = _re_pt_models.match(__snake_case ).groups()[0]
if lookup_dict is not None:
while len(__snake_case ) > 0:
if attr_name in model_prefix_to_model_type:
__snake_case = True
break
# Try again after removing the last word in the name
__snake_case = "".join(camel_case_split(__snake_case )[:-1] )
__snake_case = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__snake_case = list(__snake_case )
all_models.sort()
__snake_case = {"model_type": all_models}
__snake_case = [pt_models[t] for t in all_models]
__snake_case = [tf_models[t] for t in all_models]
__snake_case = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__snake_case = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__snake_case = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__snake_case = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__snake_case = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__snake_case = "AutoTokenizer"
__snake_case = [processors[t] for t in all_models]
return pd.DataFrame(__snake_case )
def _lowerCamelCase( __snake_case ) -> List[Any]:
__snake_case = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__snake_case = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__snake_case = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__snake_case , __snake_case , __snake_case ):
# The type of pipeline may not exist in this framework
if not hasattr(__snake_case , __snake_case ):
continue
# First extract all model_names
__snake_case = []
for name in getattr(__snake_case , __snake_case ).values():
if isinstance(__snake_case , __snake_case ):
model_names.append(__snake_case )
else:
model_names.extend(list(__snake_case ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCamelCase( __snake_case , __snake_case ) -> Union[str, Any]:
__snake_case = get_frameworks_table()
__snake_case = Dataset.from_pandas(__snake_case )
__snake_case = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__snake_case )
__snake_case = Dataset.from_json(__snake_case )
__snake_case = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(__snake_case ) )
}
__snake_case = update_pipeline_and_auto_class_table(__snake_case )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__snake_case = sorted(table.keys() )
__snake_case = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__snake_case = Dataset.from_pandas(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__snake_case , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__snake_case , "pipeline_tags.json" ) )
if commit_sha is not None:
__snake_case = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__snake_case = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__snake_case , repo_type="dataset" , token=__snake_case , commit_message=__snake_case , )
def _lowerCamelCase( ) -> Union[str, Any]:
__snake_case = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__snake_case = transformers_module.pipelines.SUPPORTED_TASKS
__snake_case = []
for key in pipeline_tasks:
if key not in in_table:
__snake_case = pipeline_tasks[key]["pt"]
if isinstance(__snake_case , (list, tuple) ):
__snake_case = model[0]
__snake_case = model.__name__
if model not in in_table.values():
missing.append(__snake_case )
if len(__snake_case ) > 0:
__snake_case = ", ".join(__snake_case )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 524 | 1 |
'''simple docstring'''
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
UpperCAmelCase__ : Optional[Any] = ''''''
UpperCAmelCase__ : List[str] = ''''''
UpperCAmelCase__ : Union[str, Any] = []
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase__ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase__ : Dict = self.__min_dist_top_down_dp(_UpperCAmelCase , n - 1 )
UpperCAmelCase__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , _UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase__ : str = 1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Any = worda
UpperCAmelCase__ : Dict = worda
UpperCAmelCase__ : int = [[-1 for _ in range(len(_UpperCAmelCase ) )] for _ in range(len(_UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(_UpperCAmelCase ) - 1 , len(_UpperCAmelCase ) - 1 )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : List[str] = worda
UpperCAmelCase__ : Tuple = worda
UpperCAmelCase__ : Optional[int] = len(_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = len(_UpperCAmelCase )
UpperCAmelCase__ : Optional[int] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase__ : Any = j
elif j == 0: # second string is empty
UpperCAmelCase__ : Optional[int] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase__ : Dict = self.dp[i - 1][j - 1]
else:
UpperCAmelCase__ : List[str] = self.dp[i][j - 1]
UpperCAmelCase__ : List[str] = self.dp[i - 1][j]
UpperCAmelCase__ : List[Any] = self.dp[i - 1][j - 1]
UpperCAmelCase__ : str = 1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
UpperCamelCase_ = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
UpperCamelCase_ = input("Enter the first string: ").strip()
UpperCamelCase_ = input("Enter the second string: ").strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 599 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)] )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Tuple = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase , config_name=_UpperCAmelCase )
UpperCAmelCase__ : List[str] = GenerationConfig.from_pretrained(_UpperCAmelCase , config_name=_UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _UpperCAmelCase )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Dict = AutoConfig.from_pretrained('''gpt2''' )
UpperCAmelCase__ : Optional[Any] = GenerationConfig.from_model_config(_UpperCAmelCase )
UpperCAmelCase__ : Dict = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase ( self ):
UpperCAmelCase__ : int = GenerationConfig()
UpperCAmelCase__ : Tuple = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
UpperCAmelCase__ : Optional[Any] = copy.deepcopy(_UpperCAmelCase )
UpperCAmelCase__ : int = generation_config.update(**_UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_UpperCAmelCase , {'''foo''': '''bar'''} )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[int] = GenerationConfig()
UpperCAmelCase__ : Tuple = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = GenerationConfig.from_pretrained(_UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
UpperCAmelCase__ : Dict = GenerationConfig.from_model_config(_UpperCAmelCase )
assert not hasattr(_UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
UpperCAmelCase__ : List[str] = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
UpperCAmelCase__ : Tuple = GenerationConfig.from_pretrained(_UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase ( cls ):
UpperCAmelCase__ : int = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def lowerCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def lowerCamelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
UpperCAmelCase__ : List[str] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCAmelCase__ : Tuple = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase ( self ):
UpperCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
UpperCAmelCase__ : str = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCAmelCase__ : List[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
| 599 | 1 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_=1_024 , snake_case_=1_024 , snake_case_=3.6 ) -> int:
__lowerCAmelCase = tokenizer
__lowerCAmelCase = tokenizer.bos_token_id
__lowerCAmelCase = dataset
__lowerCAmelCase = seq_length
__lowerCAmelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> Optional[int]:
__lowerCAmelCase = iter(self.dataset )
__lowerCAmelCase = True
while more_examples:
__lowerCAmelCase , __lowerCAmelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(snake_case_ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCAmelCase = False
break
__lowerCAmelCase = tokenizer(snake_case_ , truncation=snake_case_ )["""input_ids"""]
__lowerCAmelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(snake_case_ ) , self.seq_length ):
__lowerCAmelCase = all_token_ids[i : i + self.seq_length]
if len(snake_case_ ) == self.seq_length:
yield torch.tensor(snake_case_ )
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = {"""streaming""": True}
__lowerCAmelCase = load_dataset(args.dataset_name , split="""train""" , **_lowerCAmelCase )
__lowerCAmelCase = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
__lowerCAmelCase = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def lowercase (_lowerCAmelCase ):
model.eval()
__lowerCAmelCase = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
__lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
__lowerCAmelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCAmelCase = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
__lowerCAmelCase = torch.exp(_lowerCAmelCase )
except OverflowError:
__lowerCAmelCase = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE_ = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE_ = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = evaluate(args)
logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 465 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''efficientformer'''
def __init__( self , snake_case_ = [3, 2, 6, 4] , snake_case_ = [48, 96, 224, 448] , snake_case_ = [True, True, True, True] , snake_case_ = 448 , snake_case_ = 32 , snake_case_ = 4 , snake_case_ = 7 , snake_case_ = 5 , snake_case_ = 8 , snake_case_ = 4 , snake_case_ = 0.0 , snake_case_ = 16 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 2 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1 , snake_case_ = True , snake_case_ = True , snake_case_ = 1e-5 , snake_case_ = "gelu" , snake_case_ = 0.02 , snake_case_ = 1e-1_2 , snake_case_ = 224 , snake_case_ = 1e-0_5 , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = depths
__lowerCAmelCase = mlp_expansion_ratio
__lowerCAmelCase = downsamples
__lowerCAmelCase = dim
__lowerCAmelCase = key_dim
__lowerCAmelCase = attention_ratio
__lowerCAmelCase = resolution
__lowerCAmelCase = pool_size
__lowerCAmelCase = downsample_patch_size
__lowerCAmelCase = downsample_stride
__lowerCAmelCase = downsample_pad
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = num_metaad_blocks
__lowerCAmelCase = distillation
__lowerCAmelCase = use_layer_scale
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = image_size
__lowerCAmelCase = batch_norm_eps
| 465 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Any = {'vocab_file': 'spiece.model'}
__lowerCamelCase : str = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__lowerCamelCase : List[Any] = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
__lowerCamelCase : Dict = '▁'
class lowerCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ =VOCAB_FILES_NAMES
UpperCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]="[CLS]" , lowerCamelCase_ : str="[SEP]" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : str="[SEP]" , lowerCamelCase_ : Optional[int]="<pad>" , lowerCamelCase_ : Optional[Any]="[CLS]" , lowerCamelCase_ : List[str]="[MASK]" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[Any] , ) -> Union[str, Any]:
__magic_name__ : Dict = (
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else mask_token
)
__magic_name__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
__magic_name__ : str = do_lower_case
__magic_name__ : List[str] = remove_space
__magic_name__ : Tuple = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
return len(self.sp_model )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
__magic_name__ : List[str] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> int:
__magic_name__ : Dict = self.__dict__.copy()
__magic_name__ : Tuple = None
return state
def __setstate__( self : List[Any] , lowerCamelCase_ : List[str] ) -> int:
__magic_name__ : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ : str = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Optional[Any] ) -> List[Any]:
if self.remove_space:
__magic_name__ : str = ''' '''.join(inputs.strip().split() )
else:
__magic_name__ : Optional[int] = inputs
__magic_name__ : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__magic_name__ : Any = unicodedata.normalize('''NFKD''' , UpperCamelCase__ )
__magic_name__ : int = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
__magic_name__ : Union[str, Any] = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : Any , lowerCamelCase_ : str ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
__magic_name__ : Union[str, Any] = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
__magic_name__ : str = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__magic_name__ : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : Tuple = cur_pieces[1:]
else:
__magic_name__ : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Optional[Any] ) -> List[str]:
return self.sp_model.PieceToId(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ) -> Optional[int]:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : List[Any] ) -> Union[str, Any]:
__magic_name__ : List[Any] = []
__magic_name__ : List[str] = ''''''
__magic_name__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = []
else:
current_sub_tokens.append(UpperCamelCase__ )
__magic_name__ : Tuple = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ) -> Tuple:
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ) -> Optional[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ) -> Dict:
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ) -> Optional[int]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
__magic_name__ : int = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 712 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =inspect.getfile(accelerate.test_utils )
UpperCamelCase__ =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCamelCase__ =['''accelerate''', '''launch''']
UpperCamelCase__ =Path.home() / '''.cache/huggingface/accelerate'''
UpperCamelCase__ ='''default_config.yaml'''
UpperCamelCase__ =config_folder / config_file
UpperCamelCase__ =config_folder / '''_default_config.yaml'''
UpperCamelCase__ =Path('''tests/test_configs''' )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] ) -> int:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase__ ( cls : Dict ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
__magic_name__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=lowerCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(lowerCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ ='''test-tpu'''
UpperCamelCase__ ='''us-central1-a'''
UpperCamelCase__ ='''ls'''
UpperCamelCase__ =['''accelerate''', '''tpu-config''']
UpperCamelCase__ ='''cd /usr/share'''
UpperCamelCase__ ='''tests/test_samples/test_command_file.sh'''
UpperCamelCase__ ='''Running gcloud compute tpus tpu-vm ssh'''
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__ : int = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
__magic_name__ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=lowerCamelCase_ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
__magic_name__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
__magic_name__ : List[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : str ) -> Dict:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
| 501 | 0 |
"""simple docstring"""
import argparse
import datetime
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
_UpperCAmelCase : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(SCREAMING_SNAKE_CASE__ ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
_UpperCAmelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
_UpperCAmelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
_UpperCAmelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
_UpperCAmelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
_UpperCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
_UpperCAmelCase : Tuple = datetime.date(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) )
# Start math
if m <= 2:
_UpperCAmelCase : int = y - 1
_UpperCAmelCase : Optional[int] = m + 12
# maths var
_UpperCAmelCase : int = int(str(SCREAMING_SNAKE_CASE__ )[:2] )
_UpperCAmelCase : int = int(str(SCREAMING_SNAKE_CASE__ )[2:] )
_UpperCAmelCase : int = int(2.6 * m - 5.39 )
_UpperCAmelCase : int = int(c / 4 )
_UpperCAmelCase : int = int(k / 4 )
_UpperCAmelCase : int = int(d + k )
_UpperCAmelCase : int = int(t + u + v + x )
_UpperCAmelCase : int = int(z - (2 * c) )
_UpperCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
_UpperCAmelCase : str = f'Your date {date_input}, is a {days[str(SCREAMING_SNAKE_CASE__ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Any = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
zeller(args.date_input)
| 289 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : List[str] = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_lowerCAmelCase : List[str] = {
"gpt2": 10_24,
"gpt2-medium": 10_24,
"gpt2-large": 10_24,
"gpt2-xl": 10_24,
"distilgpt2": 10_24,
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer
def __init__( self : Optional[Any] , A : Optional[int]=None , A : List[Any]=None , A : Optional[int]=None , A : Optional[Any]="<|endoftext|>" , A : Union[str, Any]="<|endoftext|>" , A : Optional[Any]="<|endoftext|>" , A : Optional[Any]=False , **A : str , ):
super().__init__(
A , A , tokenizer_file=A , unk_token=A , bos_token=A , eos_token=A , add_prefix_space=A , **A , )
_UpperCAmelCase : List[Any] = kwargs.pop("add_bos_token" , A )
_UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A ) != add_prefix_space:
_UpperCAmelCase : Dict = getattr(A , pre_tok_state.pop("type" ) )
_UpperCAmelCase : Optional[Any] = add_prefix_space
_UpperCAmelCase : Optional[Any] = pre_tok_class(**A )
_UpperCAmelCase : List[Any] = add_prefix_space
def snake_case_ ( self : List[Any] , *A : List[Any] , **A : List[str] ):
_UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def snake_case_ ( self : Optional[Any] , *A : List[Any] , **A : int ):
_UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def snake_case_ ( self : Optional[int] , A : str , A : Optional[str] = None ):
_UpperCAmelCase : List[Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def snake_case_ ( self : int , A : "Conversation" ):
_UpperCAmelCase : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A , add_special_tokens=A ) + [self.eos_token_id] )
if len(A ) > self.model_max_length:
_UpperCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 289 | 1 |
def _lowercase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
SCREAMING_SNAKE_CASE__ = 6
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1901
SCREAMING_SNAKE_CASE__ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
SCREAMING_SNAKE_CASE__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE__ = day - days_per_month[month - 2]
if month > 12:
year += 1
SCREAMING_SNAKE_CASE__ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 400 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__snake_case = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase__ : Dict = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def snake_case__ ( cls : str ) -> List[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
_UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_UpperCamelCase = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ , repo_id='''test-model-flax''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
_UpperCamelCase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_UpperCamelCase = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 , msg=f"""{key} not identical""" )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
_UpperCamelCase = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_UpperCamelCase = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
_UpperCamelCase = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
_UpperCamelCase = flatten_dict(unfreeze(model.params ) )
_UpperCamelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_UpperCamelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 , msg=f"""{key} not identical""" )
def a__ ( lowercase : Dict, lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = True
_UpperCamelCase = flatten_dict(modela.params )
_UpperCamelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
_UpperCamelCase = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
_UpperCamelCase = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.assertTrue(check_models_equal(lowerCAmelCase__ , lowerCAmelCase__ ) )
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
_UpperCamelCase = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , max_shard_size='''10KB''' )
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.assertTrue(check_models_equal(lowerCAmelCase__ , lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = '''bert'''
_UpperCamelCase = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = '''bert'''
_UpperCamelCase = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
| 98 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
_snake_case : Dict = ['transformers', 'torch', 'note_seq']
def __init__( self : Tuple , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def snake_case__ ( cls : Dict , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def snake_case__ ( cls : str , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 98 | 1 |
import sys
def lowerCAmelCase (__A):
"""simple docstring"""
_a = len(__A)
_a = [[0 for x in range(__A)] for x in range(__A)]
_a = [[0 for x in range(__A)] for x in range(__A)]
for chain_length in range(2 , __A):
for a in range(1 , n - chain_length + 1):
_a = a + chain_length - 1
_a = sys.maxsize
for c in range(__A , __A):
_a = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a = cost
_a = c
return matrix, sol
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if i == j:
print('''A''' + str(__A) , end=''' ''')
else:
print('''(''' , end=''' ''')
print_optiomal_solution(__A , __A , optimal_solution[i][j])
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A)
print(''')''' , end=''' ''')
def lowerCAmelCase ():
"""simple docstring"""
_a = [30, 35, 15, 5, 10, 20, 25]
_a = len(__A)
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a , _a = matrix_chain_order(__A)
print('''No. of Operation required: ''' + str(matrix[1][n - 1]))
print_optiomal_solution(__A , 1 , n - 1)
if __name__ == "__main__":
main()
| 721 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__A) - np.asarray(__A)) ** 2))
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__A , __A)) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase ():
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''')
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
print('''With Numpy''')
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
benchmark()
| 352 | 0 |
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCamelCase_ = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
UpperCamelCase_ = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : bool = False ) -> List[str]:
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : List[str] = f.read()
_lowerCAmelCase : Optional[int] = content.split("""\n""" )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_lowerCAmelCase : Union[str, Any] = len(re.search(R"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_lowerCAmelCase : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_lowerCAmelCase : Tuple = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_lowerCAmelCase : Dict = sorted(lowercase__ , key=lambda _lowerCamelCase : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def _UpperCAmelCase ( _lowerCamelCase : bool = False ) -> Tuple:
_lowerCAmelCase : int = [os.path.join(lowercase__ , lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(""".py""" )]
_lowerCAmelCase : List[Any] = [sort_auto_mapping(lowercase__ , overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
_lowerCAmelCase : List[str] = [f for f, d in zip(lowercase__ , lowercase__ ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(lowercase__ )}. Run `make style` to fix'
""" this.""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 384 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : list[int] ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
__lowercase =sum(lowercase__ ) / len(lowercase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCamelCase : Union[str, Any] = '''\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'''
lowerCamelCase : str = '''\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'''
lowerCamelCase : Tuple = '''\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'''
lowerCamelCase : List[Any] = '''\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'''
lowerCamelCase : List[str] = '''The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase (datasets.Metric ):
def __UpperCAmelCase ( self )-> Optional[int]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=[1, 1_0, 1_0_0] , __UpperCamelCase=4 , __UpperCamelCase=3.0 )-> int:
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=UpperCamelCase_ ) as executor:
__lowerCAmelCase = []
__lowerCAmelCase = Counter()
__lowerCAmelCase = 0
__lowerCAmelCase = defaultdict(UpperCamelCase_ )
for task_id, (candidates, test_case) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ ) ):
for candidate in candidates:
__lowerCAmelCase = candidate + "\n" + test_case
__lowerCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
__lowerCAmelCase = executor.submit(UpperCamelCase_ , *UpperCamelCase_ )
futures.append(UpperCamelCase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCamelCase_ ):
__lowerCAmelCase = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__lowerCAmelCase , __lowerCAmelCase = [], []
for result in results.values():
result.sort()
__lowerCAmelCase = [r[1]["passed"] for r in result]
total.append(len(UpperCamelCase_ ) )
correct.append(sum(UpperCamelCase_ ) )
__lowerCAmelCase = np.array(UpperCamelCase_ )
__lowerCAmelCase = np.array(UpperCamelCase_ )
__lowerCAmelCase = k
__lowerCAmelCase = {F"""pass@{k}""": estimate_pass_at_k(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
def estimator(__snake_case , __snake_case , __snake_case ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A__ , A__ ):
__lowerCAmelCase = itertools.repeat(A__ , len(A__ ) )
else:
assert len(A__ ) == len(A__ )
__lowerCAmelCase = iter(A__ )
return np.array([estimator(int(A__ ) , int(A__ ) , A__ ) for n, c in zip(A__ , A__ )] )
| 714 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class _UpperCamelCase (a_ ):
snake_case_ = """bloom"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , __UpperCamelCase=2_5_0_8_8_0 , __UpperCamelCase=6_4 , __UpperCamelCase=2 , __UpperCamelCase=8 , __UpperCamelCase=1e-5 , __UpperCamelCase=0.0_2 , __UpperCamelCase=True , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=False , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=1 , __UpperCamelCase=False , **__UpperCamelCase , )-> Dict:
__lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__lowerCAmelCase = kwargs.pop("n_embed" , __UpperCamelCase )
__lowerCAmelCase = hidden_size if n_embed is None else n_embed
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = pretraining_tp
__lowerCAmelCase = apply_residual_connection_post_layernorm
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = slow_but_exact
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
class _UpperCamelCase (a_ ):
snake_case_ = version.parse("""1.12""" )
def __init__( self , __UpperCamelCase , __UpperCamelCase = "default" , __UpperCamelCase = None , __UpperCamelCase = False , )-> str:
super().__init__(__UpperCamelCase , task=__UpperCamelCase , patching_specs=__UpperCamelCase , use_past=__UpperCamelCase )
if not getattr(self._config , "pad_token_id" , __UpperCamelCase ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def __UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" , inverted_values_shape=__UpperCamelCase )
__lowerCAmelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __UpperCAmelCase ( self )-> int:
return self._config.n_layer
@property
def __UpperCAmelCase ( self )-> int:
return self._config.n_head
@property
def __UpperCAmelCase ( self )-> float:
return 1e-3
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
__lowerCAmelCase = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = self._config.hidden_size // self.num_attention_heads
__lowerCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__lowerCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__lowerCAmelCase = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["attention_mask"]
if self.use_past:
__lowerCAmelCase = ordered_inputs["attention_mask"].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self )-> int:
return 1_3
| 290 | 0 |
"""simple docstring"""
class a__ :
def __init__( self ):
lowercase : Any = {}
def __magic_name__ ( self ):
print(self.vertex )
for i in self.vertex:
print(_a , " -> " , " -> ".join([str(_a ) for j in self.vertex[i]] ) )
def __magic_name__ ( self , _a , _a ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_a )
else:
# else make a new vertex
lowercase : Optional[int] = [to_vertex]
def __magic_name__ ( self ):
# visited array for storing already visited nodes
lowercase : Tuple = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_a , _a )
def __magic_name__ ( self , _a , _a ):
# mark start vertex as visited
lowercase : Tuple = True
print(_a , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_a , _a )
if __name__ == "__main__":
_A : Any = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 361 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_A : List[str] = get_logger(__name__)
class a__ :
__lowerCAmelCase = """dummy_data"""
__lowerCAmelCase = """datasets"""
__lowerCAmelCase = False
def __init__( self , _a , _a , _a , _a = None , _a = False , _a = True , _a = None , ):
lowercase : int = 0
lowercase : Optional[Any] = dataset_name
lowercase : List[str] = cache_dir
lowercase : Union[str, Any] = use_local_dummy_data
lowercase : str = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : List[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Tuple = str(_a )
# to be downloaded
lowercase : Tuple = None
lowercase : List[Any] = None
@property
def __magic_name__ ( self ):
if self._dummy_file is None:
lowercase : Optional[int] = self.download_dummy_data()
return self._dummy_file
@property
def __magic_name__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : str = cached_path(
_a , cache_dir=self.cache_dir , extract_compressed_file=_a , force_extract=_a )
return os.path.join(_a , self.dummy_file_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __magic_name__ ( self ):
if self._bucket_url is None:
lowercase : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def __magic_name__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def __magic_name__ ( self , _a , *_a ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Optional[int] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_a , _a ):
return self.create_dummy_data_dict(_a , _a )
elif isinstance(_a , (list, tuple) ):
return self.create_dummy_data_list(_a , _a )
else:
return self.create_dummy_data_single(_a , _a )
def __magic_name__ ( self , _a , *_a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , _a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , *_a , **_a ):
return path
def __magic_name__ ( self ):
return {}
def __magic_name__ ( self , _a , _a ):
lowercase : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_a , _a ):
for single_url in single_urls:
download_callback(_a )
else:
lowercase : Union[str, Any] = single_urls
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_a , _a ):
lowercase : Any = [os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) )
lowercase : List[str] = value
# make sure that values are unique
if all(isinstance(_a , _a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __magic_name__ ( self , _a , _a ):
lowercase : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , _a ) ) for url in data_url )
lowercase : List[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : Tuple = [data_url[0]] * len(_a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Union[str, Any] = os.path.join(_a , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(_a )
return dummy_data_list
def __magic_name__ ( self , _a , _a ):
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(_a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
pass
def __magic_name__ ( self , _a ):
def _iter_archive_members(_a ):
# this preserves the order of the members inside the ZIP archive
lowercase : Optional[int] = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_a )
lowercase : Union[str, Any] = Path(_a )
lowercase : List[Any] = _iter_archive_members(_a ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(_a ).as_posix(), file_path.open("rb" )
def __magic_name__ ( self , _a ):
if not isinstance(_a , _a ):
lowercase : Any = [paths]
for path in paths:
if os.path.isfile(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(_a ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(_a , _a )
| 361 | 1 |
from __future__ import annotations
import math
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
)
def _lowerCamelCase ( ):
_lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 34_423]
_lowerCAmelCase = math.log(len(snake_case ) , 2 )
print(F'Optimal value : {minimax(0 , 0 , snake_case , snake_case , snake_case )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 225 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: List[str] = logging.get_logger(__name__)
_lowercase: Optional[Any] = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ ="falcon"
UpperCamelCase__ =["past_key_values"]
def __init__( self : Optional[Any] , lowercase__ : List[Any]=6_50_24 , lowercase__ : Optional[Any]=45_44 , lowercase__ : int=32 , lowercase__ : List[Any]=71 , lowercase__ : Any=1e-5 , lowercase__ : Dict=0.0_2 , lowercase__ : Union[str, Any]=True , lowercase__ : Optional[Any]=0.0 , lowercase__ : int=0.0 , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=False , lowercase__ : Tuple=False , lowercase__ : int=True , lowercase__ : List[Any]=True , lowercase__ : Optional[Any]=False , lowercase__ : Optional[Any]=11 , lowercase__ : Optional[Any]=11 , **lowercase__ : Union[str, Any] , ):
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop('n_embed' , lowercase__ )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return not self.alibi
| 225 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Dict = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """lilt"""
def __init__( self : List[Any] , a_ : List[Any]=30_522 , a_ : Optional[int]=768 , a_ : Optional[Any]=12 , a_ : Union[str, Any]=12 , a_ : Optional[int]=3_072 , a_ : Dict="gelu" , a_ : Union[str, Any]=0.1 , a_ : str=0.1 , a_ : Optional[int]=512 , a_ : Tuple=2 , a_ : Dict=0.02 , a_ : Tuple=1e-12 , a_ : str=0 , a_ : Union[str, Any]="absolute" , a_ : Dict=None , a_ : List[str]=4 , a_ : Optional[Any]=1_024 , **a_ : str , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = classifier_dropout
__snake_case = channel_shrink_ratio
__snake_case = max_ad_position_embeddings
| 69 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a : List[Any] = logging.get_logger(__name__)
__a : Dict[Optional[str], Type[Formatter]] = {}
__a : Dict[Optional[str], str] = {}
__a : Dict[Optional[str], Exception] = {}
def UpperCAmelCase ( lowercase , lowercase , lowercase = None , ):
"""simple docstring"""
__lowercase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
__lowercase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
__lowercase = format_type
def UpperCAmelCase ( lowercase , lowercase , lowercase = None ):
"""simple docstring"""
__lowercase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowercase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
__a : int = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
__a : List[str] = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
__a : str = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( lowercase , **lowercase ):
"""simple docstring"""
__lowercase = get_format_type_from_alias(lowercase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowercase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 522 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a : Dict = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = ["""ChineseCLIPFeatureExtractor"""]
__a : Dict = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 522 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowerCamelCase_ ='''fp16'''
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_, variant=UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowerCamelCase_ ='''fp16'''
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_, variant=UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
lowerCamelCase_ ='''fp16'''
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_, variant=UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCamelCase_ ='''fp16'''
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_, variant=UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
lowerCamelCase_ ='''fp16'''
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_, variant=UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
lowerCamelCase_ ='''fp16'''
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_, variant=UpperCAmelCase_ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowerCamelCase_ ='''fp16'''
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_, variant=UpperCAmelCase_ ) )
| 676 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ = HfApi()
lowercase__ = {}
# fmt: off
lowercase__ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowercase__ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowercase__ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowercase__ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowercase__ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowercase__ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowercase__ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowercase__ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowercase__ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowercase__ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowercase__ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowercase__ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowercase__ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowercase__ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowercase__ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowercase__ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 508 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
UpperCAmelCase_ : Tuple = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase_ : Union[str, Any] = CLIPImageProcessor()
UpperCAmelCase_ : Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
UpperCAmelCase_ : str = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A )
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 200_0000 ):
lowerCAmelCase = [0]
lowerCAmelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCAmelCase = 0
# the area corresponding to the grid that gives the product closest to target
lowerCAmelCase = 0
# an estimate of b, using the quadratic formula
lowerCAmelCase = 42
# the largest integer less than b_estimate
lowerCAmelCase = 42
# the largest integer less than b_estimate
lowerCAmelCase = 42
# the triangle number corresponding to b_floor
lowerCAmelCase = 42
# the triangle number corresponding to b_ceil
lowerCAmelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowerCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCAmelCase = floor(_UpperCAmelCase )
lowerCAmelCase = ceil(_UpperCAmelCase )
lowerCAmelCase = triangle_numbers[b_floor]
lowerCAmelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase = triangle_b_first_guess * triangle_a
lowerCAmelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase = triangle_b_second_guess * triangle_a
lowerCAmelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : Optional[torch.FloatTensor] = None
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any=0.999 , __lowerCAmelCase : Tuple="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
a__ = []
for i in range(__lowerCAmelCase ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = 1
@register_to_config
def __init__( self :Any ,__snake_case :int = 10_00 ,__snake_case :float = 0.00_01 ,__snake_case :float = 0.02 ,__snake_case :str = "linear" ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :int = 0 ,__snake_case :str = "epsilon" ,__snake_case :float = 1.0 ,**__snake_case :str ,) -> List[Any]:
if kwargs.get('set_alpha_to_one' ,__snake_case ) is not None:
a__ = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' ,'1.0.0' ,__snake_case ,standard_warn=__snake_case )
a__ = kwargs['set_alpha_to_one']
if trained_betas is not None:
a__ = torch.tensor(__snake_case ,dtype=torch.floataa )
elif beta_schedule == "linear":
a__ = torch.linspace(__snake_case ,__snake_case ,__snake_case ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,__snake_case ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(__snake_case )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
a__ = 1.0 - self.betas
a__ = torch.cumprod(self.alphas ,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
a__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
a__ = 1.0
# setable values
a__ = None
a__ = torch.from_numpy(np.arange(0 ,__snake_case ).copy().astype(np.intaa ) )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :torch.FloatTensor ,__snake_case :Optional[int] = None ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Dict ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
a__ = num_inference_steps
a__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ = (np.arange(0 ,__snake_case ) * step_ratio).round().copy().astype(np.intaa )
a__ = torch.from_numpy(__snake_case ).to(__snake_case )
self.timesteps += self.config.steps_offset
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :float = 0.0 ,__snake_case :bool = False ,__snake_case :Optional[torch.FloatTensor] = None ,__snake_case :bool = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
a__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
a__ = self.alphas_cumprod[timestep]
a__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
a__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
a__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
a__ = model_output
elif self.config.prediction_type == "sample":
a__ = model_output
a__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
a__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
a__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
a__ = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__snake_case ,pred_original_sample=__snake_case )
def __len__( self :Tuple ) -> Optional[int]:
return self.config.num_train_timesteps
| 335 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case__ ( a ) -> List[str]:
'''simple docstring'''
if isinstance(a , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __magic_name__:
def __lowerCAmelCase( self : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : int ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
snake_case__ = TFVisionTextDualEncoderModel(__UpperCamelCase )
snake_case__ = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowerCAmelCase( self : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : Any ):
'''simple docstring'''
snake_case__ , snake_case__ = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
snake_case__ = TFVisionTextDualEncoderModel(vision_model=__UpperCamelCase , text_model=__UpperCamelCase )
snake_case__ = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=None , **__UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case__ , snake_case__ = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
snake_case__ = {"""vision_model""": vision_model, """text_model""": text_model}
snake_case__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
snake_case__ = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case__ , snake_case__ = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
snake_case__ = TFVisionTextDualEncoderModel(vision_model=__UpperCamelCase , text_model=__UpperCamelCase )
snake_case__ = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
snake_case__ = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
snake_case__ = TFVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
snake_case__ = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
snake_case__ = after_output[0].numpy()
snake_case__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
def __lowerCAmelCase( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Tuple=None , **__UpperCamelCase : Dict ):
'''simple docstring'''
snake_case__ , snake_case__ = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
snake_case__ = TFVisionTextDualEncoderModel(vision_model=__UpperCamelCase , text_model=__UpperCamelCase )
snake_case__ = model(
input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase )
snake_case__ = output.vision_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ = to_atuple(vision_model.config.image_size )
snake_case__ = to_atuple(vision_model.config.patch_size )
snake_case__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ = output.text_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : np.ndarray , __UpperCamelCase : float ):
'''simple docstring'''
snake_case__ = np.abs((a - b) ).max()
self.assertLessEqual(__UpperCamelCase , __UpperCamelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__UpperCamelCase )
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__UpperCamelCase )
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__UpperCamelCase )
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
self.check_save_load(**__UpperCamelCase )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__UpperCamelCase )
@slow
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ , snake_case__ = self.get_pretrained_model_and_inputs()
snake_case__ = model_a(**__UpperCamelCase )
snake_case__ = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__UpperCamelCase )
snake_case__ = TFVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
snake_case__ = model_a(**__UpperCamelCase )
snake_case__ = after_outputs[0].numpy()
snake_case__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@require_tf
class __magic_name__( __lowerCAmelCase , unittest.TestCase ):
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
snake_case__ = 1_3
snake_case__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ = random_attention_mask([batch_size, 4] )
snake_case__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __lowerCAmelCase( self : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case__ = TFViTModel(__UpperCamelCase , name="""vision_model""" )
snake_case__ = TFBertModel(__UpperCamelCase , name="""text_model""" )
return vision_model, text_model
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
snake_case__ = TFViTModelTester(self )
snake_case__ = TFBertModelTester(self )
snake_case__ = vit_model_tester.prepare_config_and_inputs()
snake_case__ = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = vision_config_and_inputs
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __magic_name__( __lowerCAmelCase , unittest.TestCase ):
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
snake_case__ = 1_3
snake_case__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ = random_attention_mask([batch_size, 4] )
snake_case__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __lowerCAmelCase( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : int=None , **__UpperCamelCase : Dict ):
'''simple docstring'''
snake_case__ , snake_case__ = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
snake_case__ = TFVisionTextDualEncoderModel(vision_model=__UpperCamelCase , text_model=__UpperCamelCase )
snake_case__ = model(
input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase )
snake_case__ = output.vision_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case__ = to_atuple(vision_model.config.image_size )
snake_case__ = to_atuple(vision_model.config.patch_size )
snake_case__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ = output.text_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase( self : int , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case__ = TFDeiTModel(__UpperCamelCase , name="""vision_model""" )
snake_case__ = TFRobertaModel(__UpperCamelCase , name="""text_model""" )
return vision_model, text_model
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = TFDeiTModelTester(self )
snake_case__ = TFRobertaModelTester(self )
snake_case__ = vit_model_tester.prepare_config_and_inputs()
snake_case__ = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = vision_config_and_inputs
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __magic_name__( __lowerCAmelCase , unittest.TestCase ):
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
snake_case__ = 1_3
snake_case__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ = random_attention_mask([batch_size, 4] )
snake_case__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __lowerCAmelCase( self : Dict , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case__ = TFCLIPVisionModel(__UpperCamelCase , name="""vision_model""" )
snake_case__ = TFBertModel(__UpperCamelCase , name="""text_model""" )
return vision_model, text_model
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = TFCLIPVisionModelTester(self )
snake_case__ = TFBertModelTester(self )
snake_case__ = clip_model_tester.prepare_config_and_inputs()
snake_case__ = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ = vision_config_and_inputs
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __magic_name__( unittest.TestCase ):
@slow
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
snake_case__ = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=__UpperCamelCase )
snake_case__ = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
snake_case__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case__ = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="""np""" )
snake_case__ = model(**__UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
snake_case__ = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __UpperCamelCase , atol=1E-3 ) )
| 566 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''sentencepiece.model'''}
a__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
a__ = {
'''google/rembert''': 256,
}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : str = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any]=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Dict="[CLS]" , __UpperCamelCase : Optional[int]="[SEP]" , __UpperCamelCase : str="[UNK]" , __UpperCamelCase : Optional[int]="[SEP]" , __UpperCamelCase : Optional[int]="[PAD]" , __UpperCamelCase : Any="[CLS]" , __UpperCamelCase : str="[MASK]" , **__UpperCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ = do_lower_case
snake_case__ = remove_space
snake_case__ = keep_accents
snake_case__ = vocab_file
snake_case__ = spm.SentencePieceProcessor()
self.sp_model.Load(__UpperCamelCase )
@property
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self : Tuple , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case__ = d
snake_case__ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : int=False ):
'''simple docstring'''
snake_case__ = self.sp_model.EncodeAsPieces(__UpperCamelCase )
return pieces
def __lowerCAmelCase( self : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCamelCase )
def __lowerCAmelCase( self : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCamelCase )
def __lowerCAmelCase( self : Optional[int] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = self.sp_model.decode_pieces(__UpperCamelCase )
return out_string
def __lowerCAmelCase( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCAmelCase( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__UpperCamelCase ) )
return
snake_case__ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 566 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a_ : List[str] = sys.version_info >= (3, 1_0)
def __lowercase( UpperCAmelCase__=None , UpperCAmelCase__=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = 42
_A = 42
_A = 42
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = field(default='toto' , metadata={'help': 'help message'})
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = False
_A = True
_A = None
class lowerCamelCase__ ( lowerCAmelCase__):
"""simple docstring"""
_A = """titi"""
_A = """toto"""
class lowerCamelCase__ ( lowerCAmelCase__):
"""simple docstring"""
_A = """titi"""
_A = """toto"""
_A = 42
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = "toto"
def _a (self ):
'''simple docstring'''
lowerCamelCase = BasicEnum(self.foo )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = "toto"
def _a (self ):
'''simple docstring'''
lowerCamelCase = MixedTypeEnum(self.foo )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = None
_A = field(default=lowerCAmelCase__ , metadata={'help': 'help message'})
_A = None
_A = list_field(default=[])
_A = list_field(default=[])
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = list_field(default=[])
_A = list_field(default=[1, 2, 3])
_A = list_field(default=['Hallo', 'Bonjour', 'Hello'])
_A = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = field()
_A = field()
_A = field()
def _a (self ):
'''simple docstring'''
lowerCamelCase = BasicEnum(self.required_enum )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = field()
_A = None
_A = field(default='toto' , metadata={'help': 'help message'})
_A = list_field(default=['Hallo', 'Bonjour', 'Hello'])
if is_python_no_less_than_3_10:
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = False
_A = True
_A = None
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = None
_A = field(default=lowerCAmelCase__ , metadata={'help': 'help message'})
_A = None
_A = list_field(default=[])
_A = list_field(default=[])
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self , __a , __a ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCamelCase = {k: v for k, v in vars(UpperCamelCase__ ).items() if k != "container"}
lowerCamelCase = {k: v for k, v in vars(UpperCamelCase__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , UpperCamelCase__ ) and yy.get("choices" , UpperCamelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](UpperCamelCase__ ) , yy["type"](UpperCamelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument("--bar" , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument("--baz" , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument("--flag" , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs="?" )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
(lowerCamelCase ) = parser.parse_args_into_dataclasses(UpperCamelCase__ , look_for_args_file=UpperCamelCase__ )
self.assertFalse(example.flag )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=UpperCamelCase__ )
expected.add_argument("--baz" , default="toto" , type=UpperCamelCase__ , help="help message" )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs="?" )
expected.add_argument("--baz" , type=UpperCamelCase__ , default=UpperCamelCase__ , const=UpperCamelCase__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=UpperCamelCase__ , dest="baz" )
expected.add_argument("--opt" , type=UpperCamelCase__ , default=UpperCamelCase__ )
lowerCamelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase__ )
for dataclass_type in dataclass_types:
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase = parser.parse_args([] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
lowerCamelCase = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
lowerCamelCase = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
lowerCamelCase = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
lowerCamelCase = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , baz=UpperCamelCase__ , opt=UpperCamelCase__ ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCamelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCamelCase = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCamelCase = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCamelCase = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCamelCase = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a (self ):
'''simple docstring'''
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = "toto"
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCamelCase = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCamelCase = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=UpperCamelCase__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=UpperCamelCase__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=UpperCamelCase__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase = parser.parse_args([] )
self.assertEqual(
UpperCamelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCamelCase = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(UpperCamelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , default=UpperCamelCase__ , type=UpperCamelCase__ )
expected.add_argument("--bar" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="help message" )
expected.add_argument("--baz" , default=UpperCamelCase__ , type=UpperCamelCase__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=UpperCamelCase__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=UpperCamelCase__ )
lowerCamelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCamelCase__ )
for dataclass_type in dataclass_types:
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase = parser.parse_args([] )
self.assertEqual(UpperCamelCase__ , Namespace(foo=UpperCamelCase__ , bar=UpperCamelCase__ , baz=UpperCamelCase__ , ces=[] , des=[] ) )
lowerCamelCase = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(UpperCamelCase__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument("--required_str" , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=UpperCamelCase__ , )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=UpperCamelCase__ , required=UpperCamelCase__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=UpperCamelCase__ , )
expected.add_argument("--opt" , type=UpperCamelCase__ , default=UpperCamelCase__ )
expected.add_argument("--baz" , default="toto" , type=UpperCamelCase__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=UpperCamelCase__ )
self.argparsersEqual(UpperCamelCase__ , UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCamelCase = parser.parse_dict(UpperCamelCase__ )[0]
lowerCamelCase = BasicExample(**UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(UpperCamelCase__ , parser.parse_dict , UpperCamelCase__ , allow_extra_keys=UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(UpperCamelCase__ , "temp_json" )
os.mkdir(UpperCamelCase__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCamelCase = BasicExample(**UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(UpperCamelCase__ , "temp_yaml" )
os.mkdir(UpperCamelCase__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCamelCase = BasicExample(**UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = HfArgumentParser(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
| 623 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : List[Any] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[Any] = 20
lowerCamelCase : List[Any] = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase__ )
# tweak scores to not be uniform anymore
lowerCamelCase : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase : List[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase : List[Any] = jax.nn.softmax(UpperCamelCase__ , axis=-1 )
lowerCamelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase : List[Any] = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
lowerCamelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Dict = None
lowerCamelCase : List[str] = 10
lowerCamelCase : Optional[int] = 2
# create ramp distribution
lowerCamelCase : Dict = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCamelCase : str = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase : Union[str, Any] = 5
lowerCamelCase : Tuple = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase : Union[str, Any] = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, length) ).copy()
lowerCamelCase : Union[str, Any] = top_k_warp_safety_check(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 10
lowerCamelCase : int = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase : int = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase : Dict = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase : List[str] = np.exp(top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase : List[str] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase : List[str] = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase : str = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase : Optional[Any] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = 20
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
# check that min length is applied at length 5
lowerCamelCase : str = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase : Any = 5
lowerCamelCase : Optional[int] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase : Union[str, Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[Any] = 15
lowerCamelCase : str = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Any = 20
lowerCamelCase : List[str] = 4
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase : Any = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase : Any = 1
lowerCamelCase : Tuple = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase : str = 3
lowerCamelCase : Union[str, Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[int] = 20
lowerCamelCase : str = 4
lowerCamelCase : str = 0
lowerCamelCase : Any = 5
lowerCamelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase : List[str] = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase : Any = 4
lowerCamelCase : Any = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase : Optional[Any] = 3
lowerCamelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : str = 4
lowerCamelCase : List[str] = 10
lowerCamelCase : List[str] = 15
lowerCamelCase : Optional[int] = 2
lowerCamelCase : List[Any] = 1
lowerCamelCase : str = 15
# dummy input_ids and scores
lowerCamelCase : Dict = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Tuple = input_ids.copy()
lowerCamelCase : List[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = scores.copy()
# instantiate all dist processors
lowerCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : str = FlaxTopKLogitsWarper(3 )
lowerCamelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[int] = 10
# no processor list
lowerCamelCase : Dict = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : List[str] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[int] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# with processor list
lowerCamelCase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : Union[str, Any] = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[Any] = 4
lowerCamelCase : Optional[int] = 10
lowerCamelCase : Dict = 15
lowerCamelCase : Optional[int] = 2
lowerCamelCase : Dict = 1
lowerCamelCase : Optional[Any] = 15
# dummy input_ids and scores
lowerCamelCase : List[Any] = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Any = input_ids.copy()
lowerCamelCase : Dict = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = scores.copy()
# instantiate all dist processors
lowerCamelCase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Dict = 10
# no processor list
def run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Dict = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[int] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : int = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Dict = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
# with processor list
def run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : Tuple = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
lowerCamelCase : Dict = jax.jit(UpperCamelCase__ )
lowerCamelCase : Optional[int] = jax.jit(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = jitted_run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = jitted_run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 311 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1_00 , )-> float:
"""simple docstring"""
UpperCamelCase = x_start
UpperCamelCase = fnc(UpperCAmelCase_ )
UpperCamelCase = 0.0
for _ in range(UpperCAmelCase_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCamelCase = (x_end - x_start) / steps + xa
UpperCamelCase = fnc(UpperCAmelCase_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCamelCase = xa
UpperCamelCase = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 556 |
"""simple docstring"""
SCREAMING_SNAKE_CASE = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> float:
"""simple docstring"""
UpperCamelCase = from_type.lower().strip("s" )
UpperCamelCase = to_type.lower().strip("s" )
UpperCamelCase = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(UpperCAmelCase_ )}"
)
raise ValueError(UpperCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(UpperCAmelCase_ )}"
)
raise ValueError(UpperCAmelCase_ )
UpperCamelCase = METRIC_CONVERSION[from_sanitized]
UpperCamelCase = METRIC_CONVERSION[to_sanitized]
UpperCamelCase = 1
if from_exponent > to_exponent:
UpperCamelCase = from_exponent - to_exponent
else:
UpperCamelCase = -(to_exponent - from_exponent)
return value * pow(10 , UpperCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 556 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase__ :Dict = logging.get_logger(__name__)
lowercase__ :Optional[int] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[int] = 'bart'
_A : Optional[int] = ['past_key_values']
_A : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , __lowercase : str=50_265 , __lowercase : Dict=1_024 , __lowercase : int=12 , __lowercase : str=4_096 , __lowercase : str=16 , __lowercase : str=12 , __lowercase : Union[str, Any]=4_096 , __lowercase : Tuple=16 , __lowercase : int=0.0 , __lowercase : Tuple=0.0 , __lowercase : Any="gelu" , __lowercase : Optional[Any]=1_024 , __lowercase : Dict=0.1 , __lowercase : Any=0.0 , __lowercase : str=0.0 , __lowercase : str=0.0_2 , __lowercase : List[str]=0.0 , __lowercase : str=False , __lowercase : List[Any]=True , __lowercase : Tuple=3 , __lowercase : Optional[Any]=1 , __lowercase : str=0 , __lowercase : Tuple=2 , __lowercase : Any=True , __lowercase : Dict=2 , __lowercase : int=2 , **__lowercase : int , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Union[str, Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = encoder_attention_heads
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : List[str] = dropout
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Tuple = activation_function
__UpperCAmelCase : List[str] = init_std
__UpperCAmelCase : Dict = encoder_layerdrop
__UpperCAmelCase : int = decoder_layerdrop
__UpperCAmelCase : Union[str, Any] = classifier_dropout
__UpperCAmelCase : int = use_cache
__UpperCAmelCase : Tuple = encoder_layers
__UpperCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __lowercase ):
__UpperCAmelCase : Optional[int] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
@property
def A_ ( self : List[Any] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__UpperCAmelCase : List[Any] = {0: '''batch'''}
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__UpperCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
__UpperCAmelCase : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCAmelCase : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__UpperCAmelCase , __UpperCAmelCase : Dict = self.num_layers
for i in range(__lowercase ):
__UpperCAmelCase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__UpperCAmelCase : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__UpperCAmelCase : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def A_ ( self : str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : Dict = super().outputs
else:
__UpperCAmelCase : Any = super(__lowercase , self ).outputs
if self.use_past:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.num_layers
for i in range(__lowercase ):
__UpperCAmelCase : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__UpperCAmelCase : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A_ ( self : Union[str, Any] , __lowercase : PreTrainedTokenizer , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
__UpperCAmelCase : int = seq_length if not self.use_past else 1
__UpperCAmelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__UpperCAmelCase : Tuple = dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase : int = common_inputs['''input_ids'''].shape
__UpperCAmelCase : List[Any] = common_inputs['''decoder_input_ids'''].shape[1]
__UpperCAmelCase , __UpperCAmelCase : str = self.num_attention_heads
__UpperCAmelCase : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCAmelCase : List[Any] = decoder_seq_length + 3
__UpperCAmelCase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCAmelCase : Tuple = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase , __lowercase )] , dim=1 )
__UpperCAmelCase : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCAmelCase , __UpperCAmelCase : int = self.num_layers
__UpperCAmelCase : str = min(__lowercase , __lowercase )
__UpperCAmelCase : Optional[Any] = max(__lowercase , __lowercase ) - min_num_layers
__UpperCAmelCase : List[str] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
__UpperCAmelCase : Union[str, Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def A_ ( self : List[Any] , __lowercase : PreTrainedTokenizer , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
__UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase : List[str] = seqlen + 2
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.num_layers
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.num_attention_heads
__UpperCAmelCase : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCAmelCase : List[Any] = common_inputs['''attention_mask'''].dtype
__UpperCAmelCase : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
__UpperCAmelCase : str = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def A_ ( self : str , __lowercase : PreTrainedTokenizer , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCAmelCase : Optional[Any] = tokenizer.num_special_tokens_to_add(__lowercase )
__UpperCAmelCase : List[Any] = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
__UpperCAmelCase : Any = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__UpperCAmelCase : Dict = dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def A_ ( self : Optional[int] , __lowercase : PreTrainedTokenizer , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
elif self.task == "causal-lm":
__UpperCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
__UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def A_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : Dict , __lowercase : int , __lowercase : Tuple ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : str = super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
__UpperCAmelCase : str = super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
| 522 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :List[str] = logging.get_logger(__name__)
lowercase__ :List[str] = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : int = 'bridgetower_vision_model'
def __init__( self : str , __lowercase : Optional[Any]=768 , __lowercase : Tuple=12 , __lowercase : List[str]=3 , __lowercase : Any=16 , __lowercase : int=288 , __lowercase : List[Any]=1 , __lowercase : Any=1e-05 , __lowercase : int=False , __lowercase : Any=True , __lowercase : Any=False , **__lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__lowercase )
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Union[str, Any] = initializer_factor
__UpperCAmelCase : List[str] = layer_norm_eps
__UpperCAmelCase : Optional[int] = stop_gradient
__UpperCAmelCase : List[str] = share_layernorm
__UpperCAmelCase : int = remove_last_layer
@classmethod
def A_ ( cls : Optional[int] , __lowercase : Union[str, os.PathLike] , **__lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
if config_dict.get('''model_type''' ) == "bridgetower":
__UpperCAmelCase : Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase , **__lowercase )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : str = 'bridgetower_text_model'
def __init__( self : Any , __lowercase : Dict=50_265 , __lowercase : int=768 , __lowercase : str=12 , __lowercase : Union[str, Any]=12 , __lowercase : str=1 , __lowercase : List[Any]=3_072 , __lowercase : Optional[Any]="gelu" , __lowercase : str=0.1 , __lowercase : Dict=0.1 , __lowercase : List[str]=514 , __lowercase : List[str]=1 , __lowercase : Any=1e-05 , __lowercase : Tuple=1 , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Optional[int]="absolute" , __lowercase : Tuple=True , **__lowercase : str , ):
'''simple docstring'''
super().__init__(**__lowercase )
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : List[Any] = initializer_factor
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Dict = position_embedding_type
__UpperCAmelCase : List[Any] = use_cache
__UpperCAmelCase : Optional[Any] = pad_token_id
__UpperCAmelCase : Dict = bos_token_id
__UpperCAmelCase : Tuple = eos_token_id
@classmethod
def A_ ( cls : str , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = cls.get_config_dict(__lowercase , **__lowercase )
if config_dict.get('''model_type''' ) == "bridgetower":
__UpperCAmelCase : List[str] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase , **__lowercase )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[int] = 'bridgetower'
def __init__( self : int , __lowercase : Any=True , __lowercase : List[Any]="gelu" , __lowercase : int=768 , __lowercase : Tuple=1 , __lowercase : List[Any]=1e-05 , __lowercase : Optional[Any]=False , __lowercase : str="add" , __lowercase : int=12 , __lowercase : Optional[int]=6 , __lowercase : List[str]=False , __lowercase : Union[str, Any]=False , __lowercase : Tuple=None , __lowercase : str=None , **__lowercase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = kwargs.pop('''text_config_dict''' , __lowercase )
__UpperCAmelCase : Any = kwargs.pop('''vision_config_dict''' , __lowercase )
super().__init__(**__lowercase )
__UpperCAmelCase : Optional[Any] = share_cross_modal_transformer_layers
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : List[Any] = initializer_factor
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : Dict = share_link_tower_layers
__UpperCAmelCase : Any = link_tower_type
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Optional[int] = tie_word_embeddings
__UpperCAmelCase : Union[str, Any] = init_layernorm_from_vision_encoder
if text_config is None:
__UpperCAmelCase : List[str] = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
__UpperCAmelCase : Optional[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
__UpperCAmelCase : List[str] = BridgeTowerTextConfig(**__lowercase )
__UpperCAmelCase : List[Any] = BridgeTowerVisionConfig(**__lowercase )
@classmethod
def A_ ( cls : Any , __lowercase : BridgeTowerTextConfig , __lowercase : BridgeTowerVisionConfig , **__lowercase : Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowercase )
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Union[str, Any] = self.text_config.to_dict()
__UpperCAmelCase : int = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 522 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowercase =[
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase__ ( __lowerCamelCase : Any=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowerCAmelCase ) )
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =None
UpperCAmelCase =None
def lowerCAmelCase ( self , snake_case , snake_case) -> int:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[Any] =dataset_module_factory(snake_case , cache_dir=snake_case)
_UpperCAmelCase : List[str] =import_main_class(dataset_module.module_path , dataset=snake_case)
_UpperCAmelCase : DatasetBuilder =builder_cls(
cache_dir=snake_case , config_name=snake_case , hash=dataset_module.hash , )
_UpperCAmelCase : List[str] ='/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
_UpperCAmelCase : Any =cached_path(snake_case , cache_dir=snake_case)
self.assertTrue(os.path.exists(snake_case))
@pytest.mark.integration
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_UpperCAmelCase : Any =dataset_module_factory('wikipedia' , cache_dir=__lowerCamelCase )
_UpperCAmelCase : Optional[int] =import_main_class(dataset_module.module_path )
_UpperCAmelCase : DatasetBuilder =builder_cls(
cache_dir=__lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_UpperCAmelCase : Dict =None
builder_instance.download_and_prepare()
_UpperCAmelCase : Tuple =builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =dataset_module_factory('wikipedia' , cache_dir=__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =import_main_class(dataset_module.module_path , dataset=__lowerCamelCase )
_UpperCAmelCase : DatasetBuilder =builder_cls(
cache_dir=__lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
_UpperCAmelCase : Optional[Any] =builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert "train" in ds
assert isinstance(ds['train'] , __lowerCamelCase )
assert next(iter(ds['train'] ) )
| 331 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase =logging.get_logger(__name__)
lowercase ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase ={
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
lowercase ={
'RUCAIBox/mvp': 1024,
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =VOCAB_FILES_NAMES
UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase =["input_ids", "attention_mask"]
UpperCAmelCase =MvpTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ) -> str:
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
_UpperCAmelCase : Union[str, Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =getattr(snake_case , pre_tok_state.pop('type'))
_UpperCAmelCase : Union[str, Any] =add_prefix_space
_UpperCAmelCase : Optional[Any] =pre_tok_class(**snake_case)
_UpperCAmelCase : Union[str, Any] =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase : List[Any] ='post_processor'
_UpperCAmelCase : Optional[int] =getattr(self.backend_tokenizer , snake_case , snake_case)
if tokenizer_component_instance:
_UpperCAmelCase : int =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Any =tuple(state['sep'])
if "cls" in state:
_UpperCAmelCase : List[str] =tuple(state['cls'])
_UpperCAmelCase : str =False
if state.get('add_prefix_space' , snake_case) != add_prefix_space:
_UpperCAmelCase : List[str] =add_prefix_space
_UpperCAmelCase : Optional[int] =True
if state.get('trim_offsets' , snake_case) != trim_offsets:
_UpperCAmelCase : Union[str, Any] =trim_offsets
_UpperCAmelCase : Tuple =True
if changes_to_apply:
_UpperCAmelCase : str =getattr(snake_case , state.pop('type'))
_UpperCAmelCase : List[Any] =component_class(**snake_case)
setattr(self.backend_tokenizer , snake_case , snake_case)
@property
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def lowerCAmelCase ( self , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] =AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case) if isinstance(snake_case , snake_case) else value
_UpperCAmelCase : Any =value
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Any =kwargs.get('is_split_into_words' , snake_case)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.')
return super()._encode_plus(*snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None) -> Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : str =self._tokenizer.model.save(snake_case , name=snake_case)
return tuple(snake_case)
def lowerCAmelCase ( self , snake_case , snake_case=None) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] =[self.sep_token_id]
_UpperCAmelCase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 331 | 1 |
from sklearn.metrics import recall_score
import datasets
snake_case : Optional[int] = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
snake_case : Dict = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
snake_case : Union[str, Any] = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ):
__magic_name__ : List[Any] = recall_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , )
return {"recall": float(_a ) if score.size == 1 else score}
| 124 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = DiTPipeline
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_a , )
__magic_name__ : int = AutoencoderKL()
__magic_name__ : str = DDIMScheduler()
__magic_name__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : str = torch.manual_seed(_a )
else:
__magic_name__ : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : Dict = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = "cpu"
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Any = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Tuple = self.get_dummy_inputs(_a )
__magic_name__ : Any = pipe(**_a ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__magic_name__ : List[Any] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__magic_name__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
__magic_name__ : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__magic_name__ : int = ["vase", "umbrella", "white shark", "white wolf"]
__magic_name__ : str = pipe.get_label_ids(_a )
__magic_name__ : Dict = pipe(_a , generator=_a , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : int = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__magic_name__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__magic_name__ : List[str] = ["vase", "umbrella"]
__magic_name__ : Any = pipe.get_label_ids(_a )
__magic_name__ : List[str] = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = pipe(_a , generator=_a , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 124 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCAmelCase_ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCAmelCase_ = 0
for log in Path().glob("""*.log"""):
lowerCAmelCase_ = 0
with open(log, """r""") as f:
for line in f:
lowerCAmelCase_ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCAmelCase_ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCAmelCase_ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase_ = []
log.unlink()
lowerCAmelCase_ = """"""
lowerCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase_ = []
lowerCAmelCase_ = {}
for test in failed_tests:
lowerCAmelCase_ = test[0].split("""::""")
lowerCAmelCase_ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase_ = [test[0] for test in failed_table]
lowerCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase_ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCAmelCase_ = """Too many failed tests, please see the full report in the Action results."""
lowerCAmelCase_ = len(err) + 10
lowerCAmelCase_ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCAmelCase_ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCAmelCase_ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCAmelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCAmelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCAmelCase_ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCAmelCase_ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCAmelCase_ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase_ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase_ = row[0]
else:
lowerCAmelCase_ = """"""
lowerCAmelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 707 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 490 |
'''simple docstring'''
import operator as op
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = []
_a : List[str] = lambda __a , __a : int(x / y ) # noqa: E731 integer division operation
_a : List[Any] = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(1_2 ) , 'Stack' , sep=' | ' )
print('-' * (3_0 + len(__a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
else:
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
stack.append(
str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__lowerCAmelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 229 | 0 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 674 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCAmelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self , **_UpperCamelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase( self , **_UpperCamelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ):
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCAmelCase = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''np''' )
_UpperCAmelCase = processor(images=_UpperCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = '''lower newer'''
_UpperCAmelCase = processor(text=_UpperCamelCase )
_UpperCAmelCase = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = '''lower newer'''
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCamelCase ):
processor()
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(_UpperCamelCase )
_UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
_UpperCAmelCase = '''lower newer'''
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 32 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
_lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def _snake_case ( ):
A = os.path.dirname(os.path.realpath(snake_case__ ) )
A = os.path.join(snake_case__ , 'words.txt' )
A = ''
with open(snake_case__ ) as f:
A = f.readline()
A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 91 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 1 |
from collections import defaultdict
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(__lowercase )
if ret % 2 == 0:
cuts.append(__lowercase )
return ret
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
A_ = 1_0, 9
A_ = defaultdict(list)
A_ = {}
A_ = []
A_ = 0
A_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 393 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 670 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''donut-swin'''
lowerCamelCase :str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCAmelCase_=2_24 , lowerCAmelCase_=4 , lowerCAmelCase_=3 , lowerCAmelCase_=96 , lowerCAmelCase_=[2, 2, 6, 2] , lowerCAmelCase_=[3, 6, 12, 24] , lowerCAmelCase_=7 , lowerCAmelCase_=4.0 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=False , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-5 , **lowerCAmelCase_ , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
_A = image_size
_A = patch_size
_A = num_channels
_A = embed_dim
_A = depths
_A = len(lowerCAmelCase_ )
_A = num_heads
_A = window_size
_A = mlp_ratio
_A = qkv_bias
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = drop_path_rate
_A = hidden_act
_A = use_absolute_embeddings
_A = layer_norm_eps
_A = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''deformable_detr'''
A : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self, A=True, A=None, A=3, A=300, A=1_024, A=6, A=1_024, A=8, A=6, A=1_024, A=8, A=0.0, A=True, A="relu", A=256, A=0.1, A=0.0, A=0.0, A=0.02, A=1.0, A=True, A=False, A="sine", A="resnet50", A=True, A=False, A=4, A=4, A=4, A=False, A=300, A=False, A=1, A=5, A=2, A=1, A=1, A=5, A=2, A=0.1, A=0.25, A=False, **A, ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(A, A ):
SCREAMING_SNAKE_CASE : Any = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : Optional[Any] = config_class.from_dict(A )
SCREAMING_SNAKE_CASE : List[Any] = use_timm_backbone
SCREAMING_SNAKE_CASE : Dict = backbone_config
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : List[str] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = encoder_layers
SCREAMING_SNAKE_CASE : Tuple = encoder_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = decoder_layers
SCREAMING_SNAKE_CASE : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE : int = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Dict = auxiliary_loss
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = backbone
SCREAMING_SNAKE_CASE : Any = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : List[str] = num_feature_levels
SCREAMING_SNAKE_CASE : Dict = encoder_n_points
SCREAMING_SNAKE_CASE : Optional[int] = decoder_n_points
SCREAMING_SNAKE_CASE : List[str] = two_stage
SCREAMING_SNAKE_CASE : Optional[int] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Any = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE : Any = bbox_cost
SCREAMING_SNAKE_CASE : Tuple = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Optional[int] = mask_loss_coefficient
SCREAMING_SNAKE_CASE : List[str] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=A, **A )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.d_model
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type
return output
| 28 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_snake_case = 'pt'
elif is_tf_available():
_snake_case = 'tf'
else:
_snake_case = 'jax'
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = PerceiverTokenizer
UpperCAmelCase__ = False
def __lowercase( self ) -> Tuple:
super().setUp()
__UpperCamelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self ) -> Union[str, Any]:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __lowercase( self , **_SCREAMING_SNAKE_CASE ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCamelCase = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
try:
__UpperCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
if max_length is not None and len(_SCREAMING_SNAKE_CASE ) > max_length:
__UpperCamelCase = toks[:max_length]
if min_length is not None and len(_SCREAMING_SNAKE_CASE ) < min_length and len(_SCREAMING_SNAKE_CASE ) > 0:
while len(_SCREAMING_SNAKE_CASE ) < min_length:
__UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
__UpperCamelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
if " " not in output_txt and len(_SCREAMING_SNAKE_CASE ) > 1:
__UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
)
if with_prefix_space:
__UpperCamelCase = ' ' + output_txt
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
return output_txt, output_ids
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = 'Unicode €.'
__UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _SCREAMING_SNAKE_CASE )
# decoding
__UpperCamelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , '[CLS]Unicode €.[SEP]' )
__UpperCamelCase = tokenizer('e è é ê ë' )
__UpperCamelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _SCREAMING_SNAKE_CASE )
# decoding
__UpperCamelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __lowercase( self ) -> Any:
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__UpperCamelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if FRAMEWORK != "jax":
__UpperCamelCase = list(batch.input_ids.numpy()[0] )
else:
__UpperCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _SCREAMING_SNAKE_CASE )
self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_input_ids' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_attention_mask' , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = [
'Summary of the text.',
'Another summary.',
]
__UpperCamelCase = tokenizer(
text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowercase( self ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = ' He is very happy, UNwant\u00E9d,running'
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__UpperCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCamelCase = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = [f"""<extra_id_{i}>""" for i in range(125 )]
__UpperCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
__UpperCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCamelCase = tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCamelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_SCREAMING_SNAKE_CASE )]
__UpperCamelCase = tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def __lowercase( self ) -> int:
pass
def __lowercase( self ) -> Dict:
pass
def __lowercase( self ) -> Dict:
pass
def __lowercase( self ) -> Optional[Any]:
pass
def __lowercase( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__UpperCamelCase = self.get_tokenizers(fast=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__UpperCamelCase = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 383 | 0 |
'''simple docstring'''
import os
def UpperCamelCase_ ( ):
'''simple docstring'''
with open(os.path.dirname(A__ ) + """/p022_names.txt""" ) as file:
lowerCAmelCase_ : Union[str, Any] = str(file.readlines()[0] )
lowerCAmelCase_ : List[Any] = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Tuple = 0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
lowerCAmelCase_ : Optional[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 398 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
assert isinstance(A__ , A__ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
lowerCAmelCase_ : Any = f'The input value of [n={number}] has to be > 0'
raise ValueError(A__ )
else:
lowerCAmelCase_ : Optional[int] = sylvester(number - 1 )
lowerCAmelCase_ : Dict = num - 1
lowerCAmelCase_ : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 398 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = """openai/whisper-base"""
_SCREAMING_SNAKE_CASE :Dict = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_SCREAMING_SNAKE_CASE :List[str] = """transcriber"""
_SCREAMING_SNAKE_CASE :Optional[int] = WhisperProcessor
_SCREAMING_SNAKE_CASE :List[str] = WhisperForConditionalGeneration
_SCREAMING_SNAKE_CASE :Dict = ["""audio"""]
_SCREAMING_SNAKE_CASE :str = ["""text"""]
def _a ( self , _a ) -> int:
"""simple docstring"""
return self.pre_processor(_a , return_tensors="""pt""" ).input_features
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
return self.model.generate(inputs=_a )
def _a ( self , _a ) -> Any:
"""simple docstring"""
return self.pre_processor.batch_decode(_a , skip_special_tokens=_a )[0]
| 680 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :torch.FloatTensor
_SCREAMING_SNAKE_CASE :Optional[torch.FloatTensor] = None
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=0.999 , __lowerCAmelCase="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = 1
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_001 , _a = 0.02 , _a = "linear" , _a = None , _a = True , _a = True , _a = 0 , _a = "epsilon" , _a = 1.0 , **_a , ) -> Dict:
"""simple docstring"""
if kwargs.get("""set_alpha_to_one""" , _a ) is not None:
SCREAMING_SNAKE_CASE__ : Tuple = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , _a , standard_warn=_a )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ : Tuple = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ : Tuple = 1.0
# setable values
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[str] = torch.from_numpy(np.arange(0 , _a ).copy().astype(np.intaa ) )
def _a ( self , _a , _a = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self , _a , _a = None ) -> Optional[int]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE__ : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ : str = (np.arange(0 , _a ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(_a ).to(_a )
self.timesteps += self.config.steps_offset
def _a ( self , _a , _a , _a , _a = 0.0 , _a = False , _a = None , _a = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE__ : Optional[int] = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ : Optional[int] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ : List[Any] = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ : Dict = model_output
SCREAMING_SNAKE_CASE__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE__ : str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_a , pred_original_sample=_a )
def __len__( self ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 680 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : str = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Any = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 108 | 0 |
'''simple docstring'''
from __future__ import annotations
import queue
class a :
def __init__( self : Optional[int] , lowercase_ : Optional[Any] ):
snake_case_ = data
snake_case_ = None
snake_case_ = None
def __magic_name__ ( ) -> str:
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
snake_case_ = input('''Enter the value of the root node: ''' ).strip().lower()
snake_case_ = queue.Queue()
snake_case_ = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
snake_case_ = q.get()
snake_case_ = F"Enter the left node of {node_found.data}: "
snake_case_ = input(__a ).strip().lower() or 'n'
if check == "n":
return tree_node
snake_case_ = TreeNode(int(__a ) )
snake_case_ = left_node
q.put(__a )
snake_case_ = F"Enter the right node of {node_found.data}: "
snake_case_ = input(__a ).strip().lower() or 'n'
if check == "n":
return tree_node
snake_case_ = TreeNode(int(__a ) )
snake_case_ = right_node
q.put(__a )
raise
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
if not isinstance(__a, __a ) or not node:
return
print(node.data, end=''',''' )
pre_order(node.left )
pre_order(node.right )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
if not isinstance(__a, __a ) or not node:
return
in_order(node.left )
print(node.data, end=''',''' )
in_order(node.right )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if not isinstance(__a, __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data, end=''',''' )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if not isinstance(__a, __a ) or not node:
return
snake_case_ = queue.Queue()
q.put(__a )
while not q.empty():
snake_case_ = q.get()
print(node_dequeued.data, end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if not isinstance(__a, __a ) or not node:
return
snake_case_ = queue.Queue()
q.put(__a )
while not q.empty():
snake_case_ = []
while not q.empty():
snake_case_ = q.get()
print(node_dequeued.data, end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(__a, __a ) or not node:
return
snake_case_ = []
snake_case_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=''',''' )
stack.append(__a )
snake_case_ = n.left
# end of while means current node doesn't have left child
snake_case_ = stack.pop()
# start to traverse its right child
snake_case_ = n.right
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if not isinstance(__a, __a ) or not node:
return
snake_case_ = []
snake_case_ = node
while n or stack:
while n:
stack.append(__a )
snake_case_ = n.left
snake_case_ = stack.pop()
print(n.data, end=''',''' )
snake_case_ = n.right
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if not isinstance(__a, __a ) or not node:
return
snake_case_ = [], []
snake_case_ = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data, end=''',''' )
def __magic_name__ ( __UpperCAmelCase = "", __UpperCAmelCase=50, __UpperCAmelCase="*" ) -> Optional[Any]:
'''simple docstring'''
if not s:
return "\n" + width * char
snake_case_ = divmod(width - len(__a ) - 2, 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
a : Any = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 640 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Dict = TextToVideoSDPipeline
a_ : Dict = TEXT_TO_IMAGE_PARAMS
a_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a_ : str = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCamelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_lowerCamelCase : str = CLIPTextModel(A )
_lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCamelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _lowerCAmelCase ( self , A , A=0 ):
if str(A ).startswith('mps' ):
_lowerCamelCase : Tuple = torch.manual_seed(A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
_lowerCamelCase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Dict = TextToVideoSDPipeline(**A )
_lowerCamelCase : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(A )
_lowerCamelCase : Union[str, Any] = 'np'
_lowerCamelCase : Optional[int] = sd_pipe(**A ).frames
_lowerCamelCase : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_lowerCamelCase : Tuple = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_lowerCamelCase : Dict = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Tuple = pipe.to('cuda' )
_lowerCamelCase : str = 'Spiderman is surfing'
_lowerCamelCase : Any = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(A , generator=A , num_inference_steps=25 , output_type='pt' ).frames
_lowerCamelCase : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_lowerCamelCase : int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Optional[Any] = pipe.to('cuda' )
_lowerCamelCase : Tuple = 'Spiderman is surfing'
_lowerCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe(A , generator=A , num_inference_steps=2 , output_type='pt' ).frames
_lowerCamelCase : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 437 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 246 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a__ :
A__ : List[Any] = MBartConfig
A__ : Any = {}
A__ : List[str] = 'gelu'
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ) -> List[str]:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a = tf.concat([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_mbart_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
__a = TFMBartModel(config=UpperCAmelCase ).get_decoder()
__a = inputs_dict['input_ids']
__a = input_ids[:1, :]
__a = inputs_dict['attention_mask'][:1, :]
__a = inputs_dict['head_mask']
__a = 1
# first forward pass
__a = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
__a , __a = outputs.to_tuple()
__a = past_key_values[1]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ):
if attention_mask is None:
__a = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( __snake_case , __snake_case , unittest.TestCase ):
A__ : List[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A__ : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A__ : List[str] = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : int = True
A__ : List[str] = False
A__ : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = TFMBartModelTester(self )
__a = ConfigTester(self , config_class=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
A__ : Optional[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
A__ : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
A__ : List[Any] = 'facebook/mbart-large-en-ro'
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> Dict:
__a = self.translate_src_text(**UpperCAmelCase )
self.assertListEqual(self.expected_text , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> int:
__a = self.tokenizer(self.src_text , **UpperCAmelCase , return_tensors='tf' )
__a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__a = self.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
return generated_words
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 246 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83 |
from collections.abc import Callable
def lowerCamelCase__ ( __lowerCAmelCase : Callable[[float], float] , __lowerCAmelCase : float , __lowerCAmelCase : float ):
"""simple docstring"""
lowerCAmelCase_ = a
lowerCAmelCase_ = b
if function(__lowerCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__lowerCAmelCase ) == 0:
return b
elif (
function(__lowerCAmelCase ) * function(__lowerCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowerCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__lowerCAmelCase ) == 0:
return mid
elif function(__lowerCAmelCase ) * function(__lowerCAmelCase ) < 0:
lowerCAmelCase_ = mid
else:
lowerCAmelCase_ = mid
lowerCAmelCase_ = start + (end - start) / 2.0
return mid
def lowerCamelCase__ ( __lowerCAmelCase : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 290 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = botoa.client("""iam""" )
A__ = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_lowerCAmelCase , AssumeRolePolicyDocument=json.dumps(_lowerCAmelCase , indent=2 ) )
A__ = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_lowerCAmelCase , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_lowerCAmelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
A__ = botoa.client("""iam""" )
return iam_client.get_role(RoleName=_lowerCAmelCase )["Role"]["Arn"]
def _snake_case ( ):
A__ = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , _lowerCAmelCase , )
A__ = None
if credentials_configuration == 0:
A__ = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
A__ = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
A__ = _ask_field("""AWS Access Key ID: """ )
A__ = aws_access_key_id
A__ = _ask_field("""AWS Secret Access Key: """ )
A__ = aws_secret_access_key
A__ = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
A__ = aws_region
A__ = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , _lowerCAmelCase , )
if role_management == 0:
A__ = _ask_field("""Enter your IAM role name: """ )
else:
A__ = "accelerate_sagemaker_execution_role"
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(_lowerCAmelCase )
A__ = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="""Please enter yes or no.""" , )
A__ = None
if is_custom_docker_image:
A__ = _ask_field("""Enter your Docker image: """ , lambda UpperCAmelCase_ : str(_lowerCAmelCase ).lower() )
A__ = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="""Please enter yes or no.""" , )
A__ = None
if is_sagemaker_inputs_enabled:
A__ = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda UpperCAmelCase_ : str(_lowerCAmelCase ).lower() , )
A__ = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="""Please enter yes or no.""" , )
A__ = None
if is_sagemaker_metrics_enabled:
A__ = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda UpperCAmelCase_ : str(_lowerCAmelCase ).lower() , )
A__ = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
A__ = {}
A__ = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="""Please enter yes or no.""" , )
if use_dynamo:
A__ = "dynamo_"
A__ = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
A__ = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="""Please enter yes or no.""" , )
if use_custom_options:
A__ = _ask_options(
"""Which mode do you want to use?""" , _lowerCAmelCase , lambda UpperCAmelCase_ : TORCH_DYNAMO_MODES[int(_lowerCAmelCase )] , default="""default""" , )
A__ = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="""Please enter yes or no.""" , )
A__ = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="""Please enter yes or no.""" , )
A__ = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
A__ = _ask_options(
_lowerCAmelCase , _lowerCAmelCase , lambda UpperCAmelCase_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_lowerCAmelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
A__ = _ask_field(_lowerCAmelCase , lambda UpperCAmelCase_ : str(_lowerCAmelCase ).lower() , default="""ml.p3.2xlarge""" )
A__ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
A__ = _ask_field(
"""How many machines do you want use? [1]: """ , _lowerCAmelCase , default=1 , )
A__ = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=_lowerCAmelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_lowerCAmelCase , use_cpu=_lowerCAmelCase , dynamo_config=_lowerCAmelCase , eca_instance_type=_lowerCAmelCase , profile=_lowerCAmelCase , region=_lowerCAmelCase , iam_role_name=_lowerCAmelCase , mixed_precision=_lowerCAmelCase , num_machines=_lowerCAmelCase , sagemaker_inputs_file=_lowerCAmelCase , sagemaker_metrics_file=_lowerCAmelCase , )
| 716 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list ):
if len(UpperCAmelCase_ ) <= 1:
return lst
A__ = 1
while i < len(UpperCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A__ , A__ = lst[i], lst[i - 1]
i -= 1
if i == 0:
A__ = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 500 | 0 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( UpperCamelCase : int ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 544 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A = input('Enter image url: ').strip()
print(F'''Downloading image from {url} ...''')
A = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A = soup.find('meta', {'property': 'og:image'})['content']
A = requests.get(image_url).content
A = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 544 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Tuple = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''timm_backbone'''
def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
__a = backbone
__a = num_channels
__a = features_only
__a = use_pretrained_backbone
__a = True
__a = out_indices if out_indices is not None else (-1,)
| 721 |
from __future__ import annotations
__snake_case :Optional[Any] = []
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 1
solve(_UpperCAmelCase , row + 1 )
__a = 0
return False
def __snake_case ( _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__snake_case :Optional[Any] = 8
__snake_case :Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 60 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=True , __magic_name__=1 / 2_5_5 , __magic_name__=True , ):
"""simple docstring"""
_lowerCAmelCase = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_pad
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
if not batched:
_lowerCAmelCase = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image.size
else:
_lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
_lowerCAmelCase = self.size['shortest_edge']
elif w > h:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = self.size['shortest_edge']
else:
_lowerCAmelCase = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
_lowerCAmelCase = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ ( A__ ,unittest.TestCase ):
UpperCamelCase : List[str] = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , 'image_mean' ) )
self.assertTrue(hasattr(__magic_name__ , 'image_std' ) )
self.assertTrue(hasattr(__magic_name__ , 'do_normalize' ) )
self.assertTrue(hasattr(__magic_name__ , 'do_resize' ) )
self.assertTrue(hasattr(__magic_name__ , 'size' ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
_lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__magic_name__ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
_lowerCAmelCase = image_processing(__magic_name__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(__magic_name__ , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(__magic_name__ , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
_lowerCAmelCase = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
_lowerCAmelCase = image_processing(images=__magic_name__ , annotations=__magic_name__ , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __magic_name__ )
_lowerCAmelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __magic_name__ ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __magic_name__ )
_lowerCAmelCase = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __magic_name__ ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __magic_name__ ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __magic_name__ ) )
# verify orig_size
_lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __magic_name__ ) )
# verify size
_lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __magic_name__ ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
_lowerCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowerCAmelCase = ConditionalDetrImageProcessor(format='coco_panoptic' )
_lowerCAmelCase = image_processing(images=__magic_name__ , annotations=__magic_name__ , masks_path=__magic_name__ , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __magic_name__ )
_lowerCAmelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __magic_name__ ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __magic_name__ )
_lowerCAmelCase = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __magic_name__ ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __magic_name__ ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __magic_name__ ) )
# verify masks
_lowerCAmelCase = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __magic_name__ )
# verify orig_size
_lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __magic_name__ ) )
# verify size
_lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __magic_name__ ) )
| 589 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """upernet"""
def __init__( self : Dict, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str=512, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : Optional[Any]=[1, 2, 3, 6], lowerCamelCase : Optional[int]=True, lowerCamelCase : Tuple=0.4, lowerCamelCase : Optional[int]=384, lowerCamelCase : Optional[int]=256, lowerCamelCase : Dict=1, lowerCamelCase : str=False, lowerCamelCase : List[str]=255, **lowerCamelCase : List[Any], ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = backbone_config.get('''model_type''' )
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(lowerCamelCase )
lowercase__ = backbone_config
lowercase__ = hidden_size
lowercase__ = initializer_range
lowercase__ = pool_scales
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_in_channels
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = loss_ignore_index
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 183 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> List[Any]:
UpperCAmelCase : Dict = 1_0
UpperCAmelCase : Union[str, Any] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
UpperCAmelCase : List[Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(_lowercase ) ),
} , features=_lowercase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=_lowercase )
return filename
# FILE_CONTENT + files
a : int = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
UpperCAmelCase : int = FILE_CONTENT
with open(_lowercase , """w""" ) as f:
f.write(_lowercase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> List[str]:
import bza
UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
UpperCAmelCase : Union[str, Any] = bytes(_lowercase , """utf-8""" )
with bza.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
import gzip
UpperCAmelCase : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
UpperCAmelCase : Optional[int] = bytes(_lowercase , """utf-8""" )
with gzip.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
UpperCAmelCase : Optional[Any] = bytes(_lowercase , """utf-8""" )
with lza.frame.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(_lowercase , """w""" ) as archive:
archive.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
import tarfile
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(_lowercase , """w""" ) as f:
f.add(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> List[str]:
import lzma
UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
UpperCAmelCase : int = bytes(_lowercase , """utf-8""" )
with lzma.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
import zipfile
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
UpperCAmelCase : Union[str, Any] = bytes(_lowercase , """utf-8""" )
with zstd.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
UpperCAmelCase : Any = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(_lowercase , """w""" ) as f:
f.write(_lowercase )
return filename
a : Any = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
a : Dict = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
a : Optional[int] = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
a : int = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
a : Optional[Any] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> Any:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = datasets.Dataset.from_dict(_lowercase )
UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(_lowercase ) ) as con:
UpperCAmelCase : str = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(_lowercase , """w""" , newline="""""" ) as f:
UpperCAmelCase : Optional[Any] = csv.DictWriter(_lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(_lowercase , """w""" , newline="""""" ) as f:
UpperCAmelCase : Tuple = csv.DictWriter(_lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
import bza
UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(_lowercase , """rb""" ) as f:
UpperCAmelCase : List[str] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(_lowercase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
UpperCAmelCase : Union[str, Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(_lowercase , """wb""" ) as f:
UpperCAmelCase : int = pq.ParquetWriter(_lowercase , schema=_lowercase )
UpperCAmelCase : int = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_lowercase ) )] for k in DATA[0]} , schema=_lowercase )
writer.write_table(_lowercase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCAmelCase : Dict = {"""data""": DATA}
with open(_lowercase , """w""" ) as f:
json.dump(_lowercase , _lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCAmelCase : Tuple = {"""data""": DATA_DICT_OF_LISTS}
with open(_lowercase , """w""" ) as f:
json.dump(_lowercase , _lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(_lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(_lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(_lowercase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(_lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(_lowercase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(_lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
import gzip
UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(_lowercase , """rb""" ) as orig_file:
with gzip.open(_lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
import gzip
UpperCAmelCase : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(_lowercase , """rb""" ) as orig_file:
with gzip.open(_lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.join("""nested""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(_lowercase , """w""" ) as f:
f.add(_lowercase , arcname=os.path.basename(_lowercase ) )
f.add(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_lowercase , """w""" ) as f:
f.add(_lowercase , arcname=os.path.join("""nested""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : List[str] = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(_lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(_lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Optional[int] = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(_lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
f.write(_lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(_lowercase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> List[str]:
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> Any:
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(_lowercase , """w""" ) as f:
f.write(_lowercase , arcname=os.path.basename(_lowercase ) )
f.write(_lowercase , arcname=os.path.basename(_lowercase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 672 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
A: Any = [[0 for _ in range(__a )] for _ in range(m + 1 )]
for i in range(m + 1 ):
A: Union[str, Any] = 1
for n in range(m + 1 ):
for k in range(1 , __a ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__SCREAMING_SNAKE_CASE : Dict =int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__SCREAMING_SNAKE_CASE : Any =int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 135 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Any , _snake_case : Union[str, Any]=99 , _snake_case : List[Any]=13 , _snake_case : Optional[Any]=7 , _snake_case : Union[str, Any]=9 , _snake_case : List[Any]=True , _snake_case : Optional[int]=True , _snake_case : Any=False , _snake_case : str=32 , _snake_case : Any=5 , _snake_case : List[str]=4 , _snake_case : Dict=37 , _snake_case : List[Any]=8 , _snake_case : int=0.1 , _snake_case : List[str]=0.0_0_2 , _snake_case : Optional[Any]=1 , _snake_case : Union[str, Any]=0 , _snake_case : Any=0 , _snake_case : List[str]=None , _snake_case : List[Any]=None , ) -> int:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = encoder_seq_length
A_ = decoder_seq_length
# For common tests
A_ = self.decoder_seq_length
A_ = is_training
A_ = use_attention_mask
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = d_ff
A_ = relative_attention_num_buckets
A_ = dropout_rate
A_ = initializer_factor
A_ = eos_token_id
A_ = pad_token_id
A_ = decoder_start_token_id
A_ = None
A_ = decoder_layers
def lowerCamelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return TaConfig.from_pretrained("google/umt5-base" )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Dict=None , _snake_case : Dict=None , _snake_case : str=None , _snake_case : Any=None , _snake_case : Union[str, Any]=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
A_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_snake_case )
if decoder_head_mask is None:
A_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
if cross_attn_head_mask is None:
A_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A_ = input_ids.clamp(self.pad_token_id + 1 )
A_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
A_ = self.get_config()
A_ = config.num_attention_heads
A_ = self.prepare_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, input_dict
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A_ , A_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self : Optional[int] , _snake_case : List[str] , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
A_ = UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(
input_ids=_snake_case , decoder_input_ids=_snake_case , attention_mask=_snake_case , decoder_attention_mask=_snake_case , )
A_ = model(input_ids=_snake_case , decoder_input_ids=_snake_case )
A_ = result.last_hidden_state
A_ = result.past_key_values
A_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : int , ) -> List[str]:
"""simple docstring"""
A_ = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
A_ = model(_snake_case , use_cache=_snake_case )
A_ = model(_snake_case )
A_ = model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
A_ , A_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
A_ = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ = model(_snake_case )["last_hidden_state"]
A_ = model(_snake_case , past_key_values=_snake_case )["last_hidden_state"]
# select random slice
A_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ = output_from_no_past[:, -1, random_slice_idx].detach()
A_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3 ) )
def lowerCamelCase__ ( self : str , _snake_case : Any , _snake_case : str , ) -> List[Any]:
"""simple docstring"""
A_ = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
A_ = model(**_snake_case )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class __lowerCAmelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case = True
snake_case = False
snake_case = False
snake_case = True
snake_case = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case = [0.8, 0.9]
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
A_ = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
A_ = UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=_snake_case , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def lowerCamelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
A_ = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
A_ = self.model_tester.prepare_config_and_inputs()
A_ = config_and_inputs[0]
A_ = UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
A_ = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=_snake_case ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case , head_masking.items() ):
A_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=_snake_case )
A_ = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=_snake_case , return_dict_in_generate=_snake_case , **_snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
A_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def lowerCamelCase__ ( self : Dict ) -> int:
"""simple docstring"""
A_ = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=_snake_case ).to(_snake_case )
A_ = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=_snake_case , legacy=_snake_case )
A_ = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
A_ = tokenizer(_snake_case , return_tensors="pt" , padding=_snake_case ).input_ids
# fmt: off
A_ = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case , _snake_case )
A_ = model.generate(input_ids.to(_snake_case ) )
A_ = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
A_ = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 115 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = DDIMPipeline
_snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_snake_case = False
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : Dict = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowercase : Dict = DDIMScheduler()
_lowercase : Optional[int] = {'unet': unet, 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith('mps' ):
_lowercase : Tuple = torch.manual_seed(lowerCamelCase_ )
else:
_lowercase : Dict = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowercase : Optional[int] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : str = 'cpu'
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : Any = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowercase : Any = self.get_dummy_inputs(lowerCamelCase_ )
_lowercase : Optional[int] = pipe(**lowerCamelCase_ ).images
_lowercase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
_lowercase : Union[str, Any] = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
_lowercase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1E-3 )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = 'google/ddpm-cifar10-32'
_lowercase : int = UNetaDModel.from_pretrained(lowerCamelCase_ )
_lowercase : Optional[Any] = DDIMScheduler()
_lowercase : Dict = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddim.to(lowerCamelCase_ )
ddim.set_progress_bar_config(disable=lowerCamelCase_ )
_lowercase : Dict = torch.manual_seed(0 )
_lowercase : Optional[Any] = ddim(generator=lowerCamelCase_ , eta=0.0 , output_type='numpy' ).images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowercase : Optional[int] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_lowercase : Any = 'google/ddpm-ema-bedroom-256'
_lowercase : int = UNetaDModel.from_pretrained(lowerCamelCase_ )
_lowercase : List[str] = DDIMScheduler.from_pretrained(lowerCamelCase_ )
_lowercase : int = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddpm.to(lowerCamelCase_ )
ddpm.set_progress_bar_config(disable=lowerCamelCase_ )
_lowercase : List[str] = torch.manual_seed(0 )
_lowercase : List[str] = ddpm(generator=lowerCamelCase_ , output_type='numpy' ).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_lowercase : Any = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 705 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ""
_snake_case = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_snake_case = None # compression type in fsspec. ex: "gzip"
_snake_case = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , lowerCamelCase_ : str = "" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowercase : Union[str, Any] = fsspec.open(
lowerCamelCase_ , mode='rb' , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowercase : str = os.path.basename(self.file.path.split('::' )[0] )
_lowercase : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_lowercase : str = None
@classmethod
def __UpperCAmelCase ( cls : int , lowerCamelCase_ : List[str] ):
"""simple docstring"""
return super()._strip_protocol(lowerCamelCase_ ).lstrip('/' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.dir_cache is None:
_lowercase : Tuple = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_lowercase : int = {f['name']: f}
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.file.open().read()
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : str=None , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
_lowercase : Union[str, Any] = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "bz2"
_snake_case = "bz2"
_snake_case = ".bz2"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "gzip"
_snake_case = "gzip"
_snake_case = ".gz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "lz4"
_snake_case = "lz4"
_snake_case = ".lz4"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "xz"
_snake_case = "xz"
_snake_case = ".xz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "zstd"
_snake_case = "zstd"
_snake_case = ".zst"
def __init__( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , lowerCamelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowercase : Any = self.file.__enter__
class _lowerCamelCase :
def __init__( self : Any , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
_lowercase : Tuple = file_
def __enter__( self : str ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self : Optional[int] ):
"""simple docstring"""
return iter(self._file )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ : List[Any] , **lowerCamelCase_ : int ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
_lowercase : Optional[int] = fixed_enter
| 283 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase_ = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
lowerCAmelCase_ = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
lowerCAmelCase_ = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def snake_case__( self : Any , _UpperCamelCase : List[List[List[str]]] , _UpperCamelCase : List[List[str]] , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_UpperCamelCase , hypotheses=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase )
}
| 39 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'Salesforce/blip-image-captioning-base'
_snake_case : Union[str, Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
_snake_case : List[Any] = 'image_captioner'
_snake_case : Union[str, Any] = AutoModelForVisionaSeq
_snake_case : Dict = ['image']
_snake_case : Optional[int] = ['text']
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : "Image" ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0].strip()
| 98 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowercase__ ):
__UpperCamelCase =["torch", "scipy"]
def __init__( self : List[Any] , *snake_case__ : int , **snake_case__ : Dict ):
"""simple docstring"""
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case__ : Union[str, Any] , **snake_case__ : str ):
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def UpperCamelCase ( cls : Any , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['torch', 'scipy'] )
| 700 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : Optional[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
A : int = len(SCREAMING_SNAKE_CASE ) - 1
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , SCREAMING_SNAKE_CASE ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE ) , 5 ) == 1
return output_values
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A : List[Any] = self.basis_function(SCREAMING_SNAKE_CASE )
A : Dict = 0.0
A : Optional[Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
A : list[float] = [] # x coordinates of points to plot
A : list[float] = [] # y coordinates of points to plot
A : Tuple = 0.0
while t <= 1:
A : Union[str, Any] = self.bezier_curve_function(SCREAMING_SNAKE_CASE )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
A : Optional[int] = [i[0] for i in self.list_of_points]
A : List[Any] = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 634 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = DebertaTokenizer
__magic_name__ = True
__magic_name__ = DebertaTokenizerFast
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
A : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A : Tuple = {'''unk_token''': '''[UNK]'''}
A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Union[str, Any] = '''lower newer'''
A : List[Any] = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = self.get_tokenizer()
A : List[str] = '''lower newer'''
A : int = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : str = tokens + [tokenizer.unk_token]
A : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = self.get_tokenizer()
A : Tuple = tokenizer('''Hello''' , '''World''' )
A : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
A : List[str] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A : List[str] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A : Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A : List[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
A : List[str] = [tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
A : List[str] = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A : Any = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE )
for expected, decoded in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 634 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = [True] * limit
__snake_case = False
__snake_case = False
__snake_case = True
for i in range(3, int(limit**0.5 + 1), 2):
__snake_case = i * 2
while index < limit:
__snake_case = False
__snake_case = index + i
__snake_case = [2]
for i in range(3, snake_case, 2):
if is_prime[i]:
primes.append(snake_case)
return primes
def SCREAMING_SNAKE_CASE ( snake_case = 1_00_00_00):
__snake_case = prime_sieve(snake_case)
__snake_case = 0
__snake_case = 0
for i in range(len(snake_case)):
for j in range(i + length, len(snake_case)):
__snake_case = sum(primes[i:j])
if sol >= ceiling:
break
if sol in primes:
__snake_case = j - i
__snake_case = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 706 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase ( self : Optional[int] , A_ : List[str]=0 ) -> int:
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(A_ ) )
__snake_case = np.random.RandomState(A_ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase ( self : Optional[Any] ) -> List[Any]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase ( self : Tuple ) -> Optional[int]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : Optional[int] ) -> str:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : str ) -> List[str]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : List[str] ) -> Any:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : str ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase ( self : str ) -> Optional[int]:
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 93 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def _snake_case ( self : Dict ):
super().setUp()
SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _snake_case ( self : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_input_output_texts(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
return text, ids
def _snake_case ( self : Optional[int] ):
pass # TODO add if relevant
def _snake_case ( self : List[Any] ):
pass # TODO add if relevant
def _snake_case ( self : List[Any] ):
pass # TODO add if relevant
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : int ):
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Optional[Any] ):
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Union[str, Any] ):
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=__lowerCamelCase , normalize_text=__lowerCamelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_sudachi
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_jumanpp
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(__lowerCamelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(__lowerCamelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
SCREAMING_SNAKE_CASE = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
def _snake_case ( self : Optional[Any] ):
super().setUp()
SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self : str , **__lowerCamelCase : Optional[int] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **__lowerCamelCase )
def _snake_case ( self : Dict , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _snake_case ( self : Tuple ):
pass # TODO add if relevant
def _snake_case ( self : Tuple ):
pass # TODO add if relevant
def _snake_case ( self : Any ):
pass # TODO add if relevant
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
__lowerCamelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
SCREAMING_SNAKE_CASE = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
SCREAMING_SNAKE_CASE = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 16 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_lowerCAmelCase = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
_lowerCAmelCase = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """ Hello world! cécé herlolip"""
_lowerCAmelCase = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = dct.pop(_lowerCamelCase )
_lowerCAmelCase : int = val
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.load(_lowerCamelCase , map_location='cpu' )
_lowerCAmelCase : Optional[Any] = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = emb.weight.shape
_lowerCAmelCase : Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if not os.path.exists(_lowerCamelCase ):
_lowerCAmelCase : int = torch.hub.load('pytorch/fairseq' , _lowerCamelCase ).eval()
else:
_lowerCAmelCase : List[Any] = load_xsum_checkpoint(_lowerCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowerCAmelCase : Union[str, Any] = checkpoint_path.replace('.' , '-' )
_lowerCAmelCase : Optional[int] = BartConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : str = bart.encode(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Union[str, Any] = BartTokenizer.from_pretrained(_lowerCamelCase ).encode(_lowerCamelCase , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(_lowerCamelCase , _lowerCamelCase ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
_lowerCAmelCase : List[Any] = bart.state_dict()
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : List[Any] = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = BartForSequenceClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = bart.predict('mnli' , _lowerCamelCase , return_logits=_lowerCamelCase )
_lowerCAmelCase : int = model(_lowerCamelCase )[0] # logits
else: # no classification heads to worry about
_lowerCAmelCase : Optional[Any] = bart.model.state_dict()
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = state_dict['decoder.embed_tokens.weight']
_lowerCAmelCase : int = bart.extract_features(_lowerCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
_lowerCAmelCase : Tuple = BartModel(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Any = model(_lowerCamelCase ).model[0]
else:
_lowerCAmelCase : Any = BartForConditionalGeneration(_lowerCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowerCamelCase )
if hasattr(_lowerCamelCase , 'lm_head' ):
_lowerCAmelCase : int = make_linear_from_emb(model.model.shared )
_lowerCAmelCase : Any = model.model(_lowerCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
_lowerCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 259 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : Tuple = logging.get_logger(__name__)
A : Dict = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE( __lowerCamelCase ):
snake_case_ : List[str] = """table-transformer"""
snake_case_ : Dict = ["""past_key_values"""]
snake_case_ : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=3 , lowerCamelCase__=100 , lowerCamelCase__=6 , lowerCamelCase__=2048 , lowerCamelCase__=8 , lowerCamelCase__=6 , lowerCamelCase__=2048 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=256 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1.0 , lowerCamelCase__=False , lowerCamelCase__="sine" , lowerCamelCase__="resnet50" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=1 , lowerCamelCase__=5 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=1 , lowerCamelCase__=5 , lowerCamelCase__=2 , lowerCamelCase__=0.1 , **lowerCamelCase__ , ) -> Any:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__lowercase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowercase = backbone_config.get("""model_type""" )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
__lowercase ,__lowercase ,__lowercase = None, None, None
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def snake_case__ ( self ) -> str:
"""simple docstring"""
return self.encoder_attention_heads
@property
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
return self.d_model
class SCREAMING_SNAKE_CASE( __lowerCamelCase ):
snake_case_ : List[str] = version.parse("""1.11""" )
@property
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return 1E-5
@property
def snake_case__ ( self ) -> int:
"""simple docstring"""
return 12
| 720 |
'''simple docstring'''
def snake_case_ ( a__ : int ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(a__ )
#
# convert them to integers
for i in range(len(a__ ) ):
__lowercase = int(sequence[i] ,2 )
return sequence
def snake_case_ ( a__ : int ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = """0""" + smaller_sequence[i]
sequence.append(a__ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = """1""" + smaller_sequence[i]
sequence.append(a__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCamelCase_(lowerCamelCase_ ) -> Any:
UpperCAmelCase = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCAmelCase = 6
UpperCAmelCase = 128
UpperCAmelCase = (2, 2, 18, 2)
UpperCAmelCase = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase = 12
UpperCAmelCase = 192
UpperCAmelCase = (2, 2, 18, 2)
UpperCAmelCase = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCAmelCase = window_size
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = num_heads
return config
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]:
if "encoder.mask_token" in name:
UpperCAmelCase = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase = '''layernorm.weight'''
if name == "encoder.norm.bias":
UpperCAmelCase = '''layernorm.bias'''
if "decoder" in name:
pass
else:
UpperCAmelCase = '''swin.''' + name
return name
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(lowerCamelCase_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase = key.split("." )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = int(key_split[4] )
UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[
:dim
]
UpperCAmelCase = val[
dim : dim * 2
]
UpperCAmelCase = val[
-dim:
]
else:
UpperCAmelCase = val
return orig_state_dict
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
UpperCAmelCase = torch.load(lowerCamelCase_ , map_location="cpu" )['''model''']
UpperCAmelCase = get_swin_config(lowerCamelCase_ )
UpperCAmelCase = SwinForMaskedImageModeling(lowerCamelCase_ )
model.eval()
UpperCAmelCase = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = ViTImageProcessor(size={"height": 192, "width": 192} )
UpperCAmelCase = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
UpperCAmelCase = image_processor(images=lowerCamelCase_ , return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase = model(**lowerCamelCase_ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(F'microsoft/{model_name}' )
image_processor.push_to_hub(F'microsoft/{model_name}' )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 |
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
# Return True if there is node that has not iterated.
lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase )
lowerCAmelCase__ : Tuple = [s]
lowerCAmelCase__ : Dict = True
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = u
return visited[t]
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase ))
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = float('''Inf''' )
lowerCAmelCase__ : Dict = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] )
lowerCAmelCase__ : List[Any] = parent[s]
max_flow += path_flow
lowerCAmelCase__ : List[Any] = sink
while v != source:
lowerCAmelCase__ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase__ : Optional[Any] = parent[v]
for i in range(len(UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 678 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ =logging.get_logger(__name__)
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__lowercase = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
__lowercase = 1024
__lowercase = 4096
__lowercase = 24
__lowercase = 16
__lowercase = [5, 11, 17, 23]
__lowercase = [256, 512, 1024, 1024]
__lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__lowercase = 768
__lowercase = [1, 1, 1, 0.5]
__lowercase = [256, 512, 768, 768]
__lowercase = 150
__lowercase = 16
__lowercase = (1, 384, 384)
__lowercase = False
__lowercase = """project"""
if "ade" in checkpoint_url:
__lowercase = True
__lowercase = 768
__lowercase = [1, 1, 1, 0.5]
__lowercase = 150
__lowercase = 16
__lowercase = """huggingface/label-files"""
__lowercase = """ade20k-id2label.json"""
__lowercase = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowercase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase_ ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__lowercase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowercase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowercase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowercase = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
__lowercase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowercase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowercase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
__lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
__lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowercase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowercase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowercase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowercase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowercase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowercase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowercase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowercase = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__lowercase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowercase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowercase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowercase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowercase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowercase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowercase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowercase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowercase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowercase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowercase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowercase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowercase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowercase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowercase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowercase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowercase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowercase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowercase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowercase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowercase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
__lowercase = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
__lowercase = name.replace("""..""" , """.""" )
if "stem.conv" in name:
__lowercase = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__lowercase = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
__lowercase = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
__lowercase = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
__lowercase = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
__lowercase = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
__lowercase = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__lowercase = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: config.hidden_size, :]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase , __lowercase = get_dpt_config(UpperCamelCase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__lowercase = torch.load(UpperCamelCase__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase__ )
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(UpperCamelCase__ )
__lowercase = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
__lowercase = DPTForSemanticSegmentation(UpperCamelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Check outputs on an image
__lowercase = 480 if """ade""" in checkpoint_url else 384
__lowercase = DPTImageProcessor(size=UpperCamelCase__ )
__lowercase = prepare_img()
__lowercase = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
__lowercase = model(**UpperCamelCase__ ).logits if """ade""" in checkpoint_url else model(**UpperCamelCase__ ).predicted_depth
if show_prediction:
__lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=UpperCamelCase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
UpperCAmelCase__ =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 442 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
__lowercase = [False] * len(UpperCamelCase__ )
__lowercase = []
queue.append(UpperCamelCase__ )
__lowercase = True
while queue:
__lowercase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase__ )
__lowercase = True
__lowercase = u
return visited[t]
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__lowercase = [-1] * (len(UpperCamelCase__ ))
__lowercase = 0
while bfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
__lowercase = float("""Inf""" )
__lowercase = sink
while s != source:
# Find the minimum value in select path
__lowercase = min(UpperCamelCase__ , graph[parent[s]][s] )
__lowercase = parent[s]
max_flow += path_flow
__lowercase = sink
while v != source:
__lowercase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase = parent[v]
return max_flow
UpperCAmelCase__ =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase__ , UpperCAmelCase__ =0, 5
print(ford_fulkerson(graph, source, sink))
| 442 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowerCamelCase (lowercase_ ):
_lowercase = """xlm-roberta-xl"""
def __init__( self: int,A_: Tuple=25_0880,A_: Optional[int]=2560,A_: List[Any]=36,A_: List[str]=32,A_: str=1_0240,A_: Optional[int]="gelu",A_: Union[str, Any]=0.1,A_: Union[str, Any]=0.1,A_: List[str]=514,A_: Tuple=1,A_: str=0.0_2,A_: int=1E-05,A_: int=1,A_: Tuple=0,A_: Any=2,A_: Union[str, Any]="absolute",A_: Tuple=True,A_: Dict=None,**A_: Any,):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (lowercase_ ):
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 1 |
'''simple docstring'''
import unittest
import numpy as np
def _SCREAMING_SNAKE_CASE (A , A , A , A = None , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = np.shape(A )
lowercase__ = np.shape(A )
lowercase__ = np.shape(A )
if shape_a[0] != shape_b[0]:
lowercase__ = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(A )
if shape_b[1] != shape_c[1]:
lowercase__ = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(A )
lowercase__ = pseudo_inv
if a_inv is None:
try:
lowercase__ = np.linalg.inv(A )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase__ = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase__ = np.array([[2, 1], [6, 3]] )
lowercase__ = schur_complement(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowercase__ = np.block([[a, b], [b.T, c]] )
lowercase__ = np.linalg.det(UpperCamelCase )
lowercase__ = np.linalg.det(UpperCamelCase )
lowercase__ = np.linalg.det(UpperCamelCase )
self.assertAlmostEqual(UpperCamelCase , det_a * det_s )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase__ = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase ):
schur_complement(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase__ = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase ):
schur_complement(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 460 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"gpt-neox-20b": 2048,
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['input_ids', 'attention_mask']
def __init__( self : int , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[int]="<|endoftext|>" , lowerCamelCase__ : Dict="<|endoftext|>" , lowerCamelCase__ : Tuple="<|endoftext|>" , lowerCamelCase__ : Optional[int]=False , **lowerCamelCase__ : List[Any] , ) -> str:
"""simple docstring"""
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase__ ) != add_prefix_space:
__lowercase = getattr(lowerCamelCase__ , pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**lowerCamelCase__ )
__lowercase = add_prefix_space
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : "Conversation" ) -> List[int]:
"""simple docstring"""
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 362 |
import cva
import numpy as np
class a :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : float , lowerCamelCase__ : int ) -> Dict:
"""simple docstring"""
if k in (0.0_4, 0.0_6):
__lowercase = k
__lowercase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : int ) -> str:
"""simple docstring"""
return str(self.k )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__lowercase = cva.imread(lowerCamelCase__ , 0 )
__lowercase , __lowercase = img.shape
__lowercase = []
__lowercase = img.copy()
__lowercase = cva.cvtColor(lowerCamelCase__ , cva.COLOR_GRAY2RGB )
__lowercase , __lowercase = np.gradient(lowerCamelCase__ )
__lowercase = dx**2
__lowercase = dy**2
__lowercase = dx * dy
__lowercase = 0.0_4
__lowercase = self.window_size // 2
for y in range(lowerCamelCase__ , h - offset ):
for x in range(lowerCamelCase__ , w - offset ):
__lowercase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = (wxx * wyy) - (wxy**2)
__lowercase = wxx + wyy
__lowercase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase__ = HarrisCorner(0.04, 3)
UpperCAmelCase__ , UpperCAmelCase__ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 362 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=10 , lowercase_=3 , lowercase_=2 , lowercase_=2 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_="divided_space_time" , lowercase_=None , ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Tuple = parent
_snake_case : Optional[int] = batch_size
_snake_case : str = image_size
_snake_case : List[Any] = num_channels
_snake_case : Dict = patch_size
_snake_case : Optional[int] = num_frames
_snake_case : Optional[Any] = is_training
_snake_case : Any = use_labels
_snake_case : Dict = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : str = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[Any] = attention_type
_snake_case : str = initializer_range
_snake_case : Dict = scope
_snake_case : List[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_snake_case : int = (image_size // patch_size) ** 2
_snake_case : Optional[int] = (num_frames) * self.num_patches_per_frame + 1
def __a ( self ) -> int:
'''simple docstring'''
_snake_case : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[str] = None
if self.use_labels:
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_snake_case : str = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Dict = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_snake_case : Tuple = self.num_labels
return config
def __a ( self , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
_snake_case : Optional[int] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Any = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Optional[Any] = model(lowercase_ )
# verify the logits shape
_snake_case : Tuple = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def __a ( self ) -> Any:
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A (__UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def __a ( self ) -> List[str]:
'''simple docstring'''
_snake_case : int = TimesformerModelTester(self )
_snake_case : Tuple = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def __a ( self , lowercase_ , lowercase_ , lowercase_=False ) -> int:
'''simple docstring'''
_snake_case : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
_snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def __a ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __a ( self ) -> List[Any]:
'''simple docstring'''
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def __a ( self ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = model_class(lowercase_ )
_snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Any = [*signature.parameters.keys()]
_snake_case : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def __a ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __a ( self ) -> Any:
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def __a ( self ) -> int:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Tuple = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : str = True
for model_class in self.all_model_classes:
_snake_case : Dict = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.num_frames
_snake_case : Any = True
_snake_case : Union[str, Any] = False
_snake_case : Optional[int] = True
_snake_case : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_snake_case : Union[str, Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case : int = True
_snake_case : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_snake_case : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : str = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_snake_case : int = len(lowercase_ )
# Check attention is always last and order is fine
_snake_case : str = True
_snake_case : Optional[Any] = True
_snake_case : Tuple = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_snake_case : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
_snake_case : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __a ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Dict = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_snake_case : List[str] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Union[str, Any] = outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
_snake_case : Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Any = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def A_ ( ) -> Optional[int]:
_snake_case : Dict = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_snake_case : Any = np.load(lowercase_ )
return list(lowercase_ )
@require_torch
@require_vision
class A (unittest.TestCase ):
@cached_property
def __a ( self ) -> Dict:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : List[str] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
lowercase_ )
_snake_case : Optional[int] = self.default_image_processor
_snake_case : List[str] = prepare_video()
_snake_case : List[Any] = image_processor(video[:8] , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
_snake_case : str = model(**lowercase_ )
# verify the logits
_snake_case : Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
_snake_case : Dict = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 326 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCAmelCase_ = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
lowerCAmelCase_ = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
lowerCAmelCase_ = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A (datasets.Metric ):
def __a ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __a ( self , lowercase_ , lowercase_ , lowercase_=4 , lowercase_=False ) -> str:
'''simple docstring'''
_snake_case : Any = compute_bleu(
reference_corpus=lowercase_ , translation_corpus=lowercase_ , max_order=lowercase_ , smooth=lowercase_ )
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : str = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 326 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 400 |
import math
from datetime import datetime, timedelta
def _lowercase ( UpperCamelCase_ ) -> datetime:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = year % 19
SCREAMING_SNAKE_CASE__ = year % 4
SCREAMING_SNAKE_CASE__ = year % 7
SCREAMING_SNAKE_CASE__ = math.floor(year / 100 )
SCREAMING_SNAKE_CASE__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE__ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 18 )
else:
return datetime(UpperCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__snake_case = """will be""" if year > datetime.now().year else """was"""
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 400 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: Union[str, Any] = XCLIPTextConfig()
# derive patch size from model name
_lowercase: List[str] = model_name.find("patch" )
_lowercase: Optional[int] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
_lowercase: List[str] = XCLIPVisionConfig(patch_size=_lowercase , num_frames=_lowercase )
if "large" in model_name:
_lowercase: Optional[int] = 7_6_8
_lowercase: Dict = 3_0_7_2
_lowercase: Dict = 1_2
_lowercase: str = 1_0_2_4
_lowercase: Any = 4_0_9_6
_lowercase: Union[str, Any] = 1_6
_lowercase: Optional[Any] = 2_4
_lowercase: List[Any] = 7_6_8
_lowercase: Dict = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
_lowercase: int = 3_3_6
_lowercase: List[Any] = XCLIPConfig.from_text_vision_configs(_lowercase , _lowercase )
if "large" in model_name:
_lowercase: str = 7_6_8
return config
def __lowerCAmelCase ( __magic_name__ ):
if name == "token_embedding.weight":
_lowercase: Optional[int] = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
_lowercase: Any = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
_lowercase: Any = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowercase: List[str] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowercase: List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowercase: Any = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
_lowercase: int = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
_lowercase: str = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
_lowercase: List[str] = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
_lowercase: List[str] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
_lowercase: Optional[Any] = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
_lowercase: Union[str, Any] = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
_lowercase: Dict = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
_lowercase: List[Any] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
_lowercase: Optional[int] = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
_lowercase: str = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
_lowercase: Optional[int] = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
_lowercase: List[str] = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
_lowercase: List[str] = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
_lowercase: int = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
_lowercase: Optional[int] = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
_lowercase: Optional[int] = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
for key in orig_state_dict.copy().keys():
_lowercase: Any = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
_lowercase: Optional[int] = key.split("." )
if key.startswith("visual" ):
_lowercase: List[Any] = key_split[3]
_lowercase: Optional[int] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_lowercase: List[str] = val[
:dim, :
]
_lowercase: Optional[int] = val[
dim : dim * 2, :
]
_lowercase: Optional[int] = val[
-dim:, :
]
else:
_lowercase: List[Any] = val[
:dim
]
_lowercase: Any = val[
dim : dim * 2
]
_lowercase: List[str] = val[
-dim:
]
else:
if "weight" in key:
_lowercase: Optional[int] = val[
:dim, :
]
_lowercase: List[Any] = val[
dim : dim * 2, :
]
_lowercase: List[str] = val[
-dim:, :
]
else:
_lowercase: List[Any] = val[:dim]
_lowercase: List[Any] = val[
dim : dim * 2
]
_lowercase: Optional[Any] = val[-dim:]
elif key.startswith("mit" ):
_lowercase: Optional[Any] = key_split[2]
_lowercase: Tuple = config.vision_config.mit_hidden_size
if "weight" in key:
_lowercase: Optional[Any] = val[:dim, :]
_lowercase: Optional[int] = val[dim : dim * 2, :]
_lowercase: Optional[int] = val[-dim:, :]
else:
_lowercase: List[str] = val[:dim]
_lowercase: List[Any] = val[dim : dim * 2]
_lowercase: int = val[-dim:]
else:
_lowercase: List[str] = key_split[2]
_lowercase: List[str] = config.text_config.hidden_size
if "weight" in key:
_lowercase: Tuple = val[:dim, :]
_lowercase: Any = val[
dim : dim * 2, :
]
_lowercase: str = val[-dim:, :]
else:
_lowercase: Optional[int] = val[:dim]
_lowercase: Tuple = val[
dim : dim * 2
]
_lowercase: List[Any] = val[-dim:]
else:
_lowercase: int = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_lowercase: Dict = val.T
_lowercase: Tuple = val
return orig_state_dict
def __lowerCAmelCase ( __magic_name__ ):
if num_frames == 8:
_lowercase: Tuple = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
_lowercase: Union[str, Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
_lowercase: Optional[Any] = '''eating_spaghetti_32_frames.npy'''
_lowercase: List[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=_lowercase , repo_type="dataset" , )
_lowercase: Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def __lowerCAmelCase ( __magic_name__ , __magic_name__=None , __magic_name__=False ):
_lowercase: Optional[Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
_lowercase: Optional[int] = model_to_url[model_name]
_lowercase: Dict = 8
if "16-frames" in model_name:
_lowercase: List[Any] = 1_6
elif "shot" in model_name:
_lowercase: Tuple = 3_2
_lowercase: Any = get_xclip_config(_lowercase , _lowercase )
_lowercase: str = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
_lowercase: int = '''pytorch_model.bin'''
gdown.cached_download(_lowercase , _lowercase , quiet=_lowercase )
_lowercase: Union[str, Any] = torch.load(_lowercase , map_location="cpu" )['''model''']
else:
_lowercase: Optional[int] = torch.hub.load_state_dict_from_url(_lowercase )['''model''']
_lowercase: Dict = convert_state_dict(_lowercase , _lowercase )
_lowercase: int = XCLIPModel(_lowercase )
_lowercase: List[Any] = model.load_state_dict(_lowercase , strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_lowercase: Optional[Any] = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
_lowercase: List[str] = VideoMAEImageProcessor(size=_lowercase )
_lowercase: Tuple = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
_lowercase: Tuple = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
_lowercase: List[Any] = XCLIPProcessor(image_processor=_lowercase , tokenizer=_lowercase )
_lowercase: Dict = prepare_video(_lowercase )
_lowercase: List[str] = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=_lowercase , return_tensors="pt" , padding=_lowercase )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
_lowercase: str = model(**_lowercase )
# Verify outputs
_lowercase: Dict = outputs.logits_per_video
_lowercase: Union[str, Any] = logits_per_video.softmax(dim=1 )
print("Probs:" , _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
_lowercase: Tuple = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
_lowercase: Any = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_lowercase: Tuple = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
_lowercase: Union[str, Any] = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_lowercase: str = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
_lowercase: Optional[int] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_lowercase: Optional[int] = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_lowercase: List[str] = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_lowercase: Optional[Any] = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_lowercase: List[Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_lowercase: str = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_lowercase: List[str] = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_lowercase: Union[str, Any] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_lowercase: Dict = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_lowercase: Union[str, Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_lowercase: Optional[Any] = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_lowercase: Any = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_lowercase: List[str] = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(_lowercase , organization="nielsr" )
processor.push_to_hub(_lowercase , organization="nielsr" )
slow_tokenizer.push_to_hub(_lowercase , organization="nielsr" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 30 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_lowercase : Optional[Any] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def __lowercase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_lowercase : Union[str, Any] = get_activation('''gelu''' )
_lowercase : Tuple = get_activation('''gelu_10''' )
_lowercase : str = torch_builtin(UpperCamelCase_ )
_lowercase : Optional[int] = geluaa(UpperCamelCase_ )
_lowercase : str = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __lowercase ( self : Optional[int] ) -> str:
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : str = get_activation('''gelu''' )
_lowercase : Dict = 1
_lowercase : Union[str, Any] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
_lowercase : List[Any] = acta.a
| 411 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'yjernite/retribert-base-uncased': 5_12,
}
lowerCamelCase__ = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = RetriBertTokenizer
snake_case_ = ['input_ids', 'attention_mask']
def __init__( self : str , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : str="[UNK]" , UpperCamelCase_ : Optional[int]="[SEP]" , UpperCamelCase_ : Union[str, Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : int="[MASK]" , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
_lowercase : Any = do_lower_case
_lowercase : List[Any] = strip_accents
_lowercase : Union[str, Any] = tokenize_chinese_chars
_lowercase : Optional[int] = normalizer_class(**UpperCamelCase_ )
_lowercase : str = do_lower_case
def __lowercase ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str=None ) -> Any:
'''simple docstring'''
_lowercase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 411 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_UpperCAmelCase = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_UpperCAmelCase = "UperNetConfig"
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Union[int, Tuple[int, int]] , UpperCamelCase__ : Union[int, Tuple[int, int], str] = 0 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
A = nn.Convad(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , bias=UpperCamelCase__ , dilation=UpperCamelCase__ , )
A = nn.BatchNormad(UpperCamelCase__ )
A = nn.ReLU()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : torch.Tensor ):
A = self.conv(UpperCamelCase__ )
A = self.batch_norm(UpperCamelCase__ )
A = self.activation(UpperCamelCase__ )
return output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = [
nn.AdaptiveAvgPoolad(UpperCamelCase__ ),
UperNetConvModule(UpperCamelCase__ , UpperCamelCase__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : torch.Tensor ):
A = input
for layer in self.layers:
A = layer(UpperCamelCase__ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Tuple[int, ...] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool ):
super().__init__()
A = pool_scales
A = align_corners
A = in_channels
A = channels
A = []
for i, pool_scale in enumerate(UpperCamelCase__ ):
A = UperNetPyramidPoolingBlock(pool_scale=UpperCamelCase__ , in_channels=UpperCamelCase__ , channels=UpperCamelCase__ )
self.blocks.append(UpperCamelCase__ )
self.add_module(str(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.Tensor ):
A = []
for ppm in self.blocks:
A = ppm(UpperCamelCase__ )
A = nn.functional.interpolate(
UpperCamelCase__ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(UpperCamelCase__ )
return ppm_outs
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = config
A = config.pool_scales # e.g. (1, 2, 3, 6)
A = in_channels
A = config.hidden_size
A = False
A = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
A = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
A = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
A = nn.ModuleList()
A = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
A = UperNetConvModule(UpperCamelCase__ , self.channels , kernel_size=1 )
A = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(UpperCamelCase__ )
self.fpn_convs.append(UpperCamelCase__ )
A = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCamelCase ( self : Optional[int] ):
self.apply(self._init_weights )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if isinstance(UpperCamelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
A = inputs[-1]
A = [x]
psp_outs.extend(self.psp_modules(UpperCamelCase__ ) )
A = torch.cat(UpperCamelCase__ , dim=1 )
A = self.bottleneck(UpperCamelCase__ )
return output
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.Tensor ):
# build laterals
A = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCamelCase__ ) )
# build top-down path
A = len(UpperCamelCase__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
A = laterals[i - 1].shape[2:]
A = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=UpperCamelCase__ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
A = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
A = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
A = torch.cat(UpperCamelCase__ , dim=1 )
A = self.fpn_bottleneck(UpperCamelCase__ )
A = self.classifier(UpperCamelCase__ )
return output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
A = config
A = config.auxiliary_in_channels
A = config.auxiliary_channels
A = config.auxiliary_num_convs
A = config.auxiliary_concat_input
A = in_index
A = (kernel_size // 2) * dilation
A = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , dilation=UpperCamelCase__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , dilation=UpperCamelCase__ ) )
if self.num_convs == 0:
A = nn.Identity()
else:
A = nn.Sequential(*UpperCamelCase__ )
if self.concat_input:
A = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=UpperCamelCase__ , padding=kernel_size // 2 )
A = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCamelCase ( self : Optional[Any] ):
self.apply(self._init_weights )
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : List[str] ):
if isinstance(UpperCamelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : torch.Tensor ):
# just take the relevant feature maps
A = encoder_hidden_states[self.in_index]
A = self.convs(UpperCamelCase__ )
if self.concat_input:
A = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
A = self.classifier(UpperCamelCase__ )
return output
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetConfig
SCREAMING_SNAKE_CASE : List[str] = '''pixel_values'''
SCREAMING_SNAKE_CASE : int = True
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCamelCase ( self : str ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = value
_UpperCAmelCase = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , __lowercase , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Union[str, Any] ):
super().__init__(UpperCamelCase__ )
A = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
A = UperNetHead(UpperCamelCase__ , in_channels=self.backbone.channels )
A = UperNetFCNHead(UpperCamelCase__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , ):
A = return_dict if return_dict is not None else self.config.use_return_dict
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = output_attentions if output_attentions is not None else self.config.output_attentions
A = self.backbone.forward_with_filtered_kwargs(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , output_attentions=UpperCamelCase__ )
A = outputs.feature_maps
A = self.decode_head(UpperCamelCase__ )
A = nn.functional.interpolate(UpperCamelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCamelCase__ )
A = None
if self.auxiliary_head is not None:
A = self.auxiliary_head(UpperCamelCase__ )
A = nn.functional.interpolate(
UpperCamelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCamelCase__ )
A = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
A = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
A = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
A = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
A = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
A = (logits,) + outputs[1:]
else:
A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 699 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : List[Any] = batch_size
_SCREAMING_SNAKE_CASE : Tuple = seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : Tuple = use_input_mask
_SCREAMING_SNAKE_CASE : int = use_token_type_ids
_SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
_SCREAMING_SNAKE_CASE : List[str] = vocab_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
_SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : int = num_labels
_SCREAMING_SNAKE_CASE : List[str] = num_choices
_SCREAMING_SNAKE_CASE : Tuple = scope
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , use_stable_embedding=__lowerCamelCase , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = OpenLlamaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = OpenLlamaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[str]:
_SCREAMING_SNAKE_CASE : Any = OpenLlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[int] = OpenLlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
_SCREAMING_SNAKE_CASE : Dict = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_SCREAMING_SNAKE_CASE : Dict = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
_SCREAMING_SNAKE_CASE : Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
# select random slice
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_SCREAMING_SNAKE_CASE : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : Optional[int] = config_and_inputs
_SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__snake_case = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__snake_case = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = OpenLlamaModelTester(self )
_SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 3
_SCREAMING_SNAKE_CASE : int = input_dict["input_ids"]
_SCREAMING_SNAKE_CASE : int = input_ids.ne(1 ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : int = OpenLlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : List[str] = 3
_SCREAMING_SNAKE_CASE : List[Any] = "single_label_classification"
_SCREAMING_SNAKE_CASE : Union[str, Any] = input_dict["input_ids"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(1 ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[Any] = OpenLlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Tuple = 3
_SCREAMING_SNAKE_CASE : List[str] = "multi_label_classification"
_SCREAMING_SNAKE_CASE : Optional[int] = input_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids.ne(1 ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_SCREAMING_SNAKE_CASE : Optional[Any] = OpenLlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([1, 1_0] , config.vocab_size )
_SCREAMING_SNAKE_CASE : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_SCREAMING_SNAKE_CASE : Tuple = OpenLlamaModel(__lowerCamelCase )
original_model.to(__lowerCamelCase )
original_model.eval()
_SCREAMING_SNAKE_CASE : List[str] = original_model(__lowerCamelCase ).last_hidden_state
_SCREAMING_SNAKE_CASE : int = original_model(__lowerCamelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_SCREAMING_SNAKE_CASE : Optional[int] = {"type": scaling_type, "factor": 10.0}
_SCREAMING_SNAKE_CASE : Optional[Any] = OpenLlamaModel(__lowerCamelCase )
scaled_model.to(__lowerCamelCase )
scaled_model.eval()
_SCREAMING_SNAKE_CASE : int = scaled_model(__lowerCamelCase ).last_hidden_state
_SCREAMING_SNAKE_CASE : str = scaled_model(__lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-5 ) )
| 381 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
set_seed(770)
UpperCamelCase__ ={
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
UpperCamelCase__ ={
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
UpperCamelCase__ =os.path.dirname(os.path.abspath(__file__))
UpperCamelCase__ =os.path.join(os.path.expanduser('~'), '.cache')
UpperCamelCase__ =os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCamelCase, REMOTE_MODEL_PATHS[key]["file_name"] )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
hf_hub_download(repo_id=__lowerCamelCase, filename=__lowerCamelCase, local_dir=__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase="text" ):
if model_type == "text":
_SCREAMING_SNAKE_CASE : List[Any] = BarkSemanticModel
_SCREAMING_SNAKE_CASE : Any = BarkSemanticConfig
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE : List[str] = BarkCoarseModel
_SCREAMING_SNAKE_CASE : Any = BarkCoarseConfig
_SCREAMING_SNAKE_CASE : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE : Optional[int] = BarkFineModel
_SCREAMING_SNAKE_CASE : List[str] = BarkFineConfig
_SCREAMING_SNAKE_CASE : Optional[int] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : List[str] = f"""{model_type}_small""" if use_small else model_type
_SCREAMING_SNAKE_CASE : Optional[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCamelCase ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"], model_info["file_name"] )
_SCREAMING_SNAKE_CASE : str = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )
# this is a hack
_SCREAMING_SNAKE_CASE : Any = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE : Optional[int] = model_args["vocab_size"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE : List[Any] = model_args.pop("n_head" )
_SCREAMING_SNAKE_CASE : Dict = model_args.pop("n_embd" )
_SCREAMING_SNAKE_CASE : Tuple = model_args.pop("n_layer" )
_SCREAMING_SNAKE_CASE : Tuple = ConfigClass(**checkpoint["model_args"] )
_SCREAMING_SNAKE_CASE : int = ModelClass(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfigClass()
_SCREAMING_SNAKE_CASE : Optional[int] = model_generation_config
_SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint["model"]
# fixup checkpoint
_SCREAMING_SNAKE_CASE : Optional[Any] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE : Optional[int] = k[len(__lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE : Tuple = new_k.replace(__lowerCamelCase, new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE : int = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_SCREAMING_SNAKE_CASE : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE : List[str] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(__lowerCamelCase ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(__lowerCamelCase ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = model.num_parameters(exclude_embeddings=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1e6, 1 )}M params, {round(__lowerCamelCase, 3 )} loss""" )
model.eval()
model.to(__lowerCamelCase )
del checkpoint, state_dict
return model
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" # do conversion on cpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = _get_ckpt_path(__lowerCamelCase, use_small=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = _load_model(__lowerCamelCase, __lowerCamelCase, model_type=__lowerCamelCase, use_small=__lowerCamelCase )
# load bark initial model
_SCREAMING_SNAKE_CASE : Union[str, Any] = _bark_load_model(__lowerCamelCase, "cpu", model_type=__lowerCamelCase, use_small=__lowerCamelCase )
if model_type == "text":
_SCREAMING_SNAKE_CASE : str = bark_model["model"]
if model.num_parameters(exclude_embeddings=__lowerCamelCase ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE : Optional[Any] = 5
_SCREAMING_SNAKE_CASE : Optional[int] = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE : Any = torch.randint(256, (batch_size, sequence_length), dtype=torch.int )
_SCREAMING_SNAKE_CASE : Optional[int] = bark_model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
# take last logits
_SCREAMING_SNAKE_CASE : List[str] = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE : Tuple = 3
_SCREAMING_SNAKE_CASE : Union[str, Any] = 8
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int )
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = bark_model(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Dict = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Dict = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE : int = BarkSemanticModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = BarkCoarseModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkFineModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE : Any = BarkConfig.from_sub_model_configs(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE : str = BarkModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = semantic
_SCREAMING_SNAKE_CASE : Tuple = coarseAcoustic
_SCREAMING_SNAKE_CASE : List[str] = fineAcoustic
_SCREAMING_SNAKE_CASE : Tuple = codec
_SCREAMING_SNAKE_CASE : Tuple = bark_generation_config
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
bark.save_pretrained(__lowerCamelCase, repo_id=__lowerCamelCase, push_to_hub=__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
UpperCamelCase__ =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 381 | 1 |
"""simple docstring"""
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = word.split()
def justify(_snake_case , _snake_case , _snake_case ) -> str:
UpperCAmelCase = max_width - width
UpperCAmelCase = len(_snake_case )
if len(_snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_snake_case ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase = []
for i in range(_snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_snake_case )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = 0
for word in words:
if width + len(_snake_case ) + len(_snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_snake_case )
width += len(_snake_case )
else:
# justify the line and add it to result
answer.append(justify(_snake_case , _snake_case , _snake_case ) )
# reset new line and new width
UpperCAmelCase , UpperCAmelCase = [word], len(_snake_case )
UpperCAmelCase = max_width - width - len(_snake_case )
answer.append(""" """.join(_snake_case ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : Tuple = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 534 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class A ( _a ):
_SCREAMING_SNAKE_CASE : List[str] = '''swinv2'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[int]=224 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : str=96 , __UpperCAmelCase : List[Any]=[2, 2, 6, 2] , __UpperCAmelCase : Optional[Any]=[3, 6, 12, 24] , __UpperCAmelCase : List[Any]=7 , __UpperCAmelCase : str=4.0 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : str=1E-5 , __UpperCAmelCase : Dict=32 , **__UpperCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(**_A )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embed_dim
UpperCamelCase_ = depths
UpperCamelCase_ = len(_A )
UpperCamelCase_ = num_heads
UpperCamelCase_ = window_size
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = hidden_act
UpperCamelCase_ = use_absolute_embeddings
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = initializer_range
UpperCamelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase_ = int(embed_dim * 2 ** (len(_A ) - 1) )
UpperCamelCase_ = (0, 0, 0, 0)
| 714 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = IFPipeline
_SCREAMING_SNAKE_CASE : Any = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def lowercase__ ( self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=0 ) -> Tuple:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(__UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
UpperCamelCase_ = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase_ , UpperCamelCase_ = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase_ = None
UpperCamelCase_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase_ = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase_ = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
_start_torch_memory_measurement()
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , num_inference_steps=2 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def lowercase__ ( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ) -> int:
"""simple docstring"""
_start_torch_memory_measurement()
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=2 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , original_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def lowercase__ ( self : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , num_inference_steps=2 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__UpperCAmelCase )
UpperCamelCase_ = pipe_a(
prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , original_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def a_ ( ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 559 | 0 |
# Algorithm for the pigeonhole sorting
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase ) # min() finds the minimum value
SCREAMING_SNAKE_CASE_ = max(__UpperCAmelCase ) # max() finds the maximum value
SCREAMING_SNAKE_CASE_ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE_ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE_ = 0
for count in range(__UpperCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE_ = count + min_val
i += 1
def UpperCAmelCase_ ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCAmelCase )
print('Sorted order is:' , ' '.join(__UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 31 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 31 | 1 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __snake_case ( tf.keras.optimizers.schedules.LearningRateSchedule):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ = 1.0 , a_ = None , ):
super().__init__()
a__ = initial_learning_rate
a__ = warmup_steps
a__ = power
a__ = decay_schedule_fn
a__ = name
def __call__( self , a_ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
a__ = tf.cast(lowerCAmelCase_ , tf.floataa )
a__ = tf.cast(self.warmup_steps , tf.floataa )
a__ = global_step_float / warmup_steps_float
a__ = self.initial_learning_rate * tf.math.pow(lowerCAmelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCAmelCase_ , )
def _a ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def A_ ( __a : float , __a : int , __a : int , __a : float = 0.0 , __a : float = 0.9 , __a : float = 0.9_9_9 , __a : float = 1e-8 , __a : Optional[float] = None , __a : Optional[float] = None , __a : float = 0.0 , __a : float = 1.0 , __a : Optional[List[str]] = None , ):
"""simple docstring"""
a__ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=snake_case__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=snake_case__ , )
if num_warmup_steps:
a__ = WarmUp(
initial_learning_rate=snake_case__ , decay_schedule_fn=snake_case__ , warmup_steps=snake_case__ , )
if weight_decay_rate > 0.0:
a__ = AdamWeightDecay(
learning_rate=snake_case__ , weight_decay_rate=snake_case__ , beta_a=snake_case__ , beta_a=snake_case__ , epsilon=snake_case__ , clipnorm=snake_case__ , global_clipnorm=snake_case__ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=snake_case__ , )
else:
a__ = tf.keras.optimizers.Adam(
learning_rate=snake_case__ , beta_a=snake_case__ , beta_a=snake_case__ , epsilon=snake_case__ , clipnorm=snake_case__ , global_clipnorm=snake_case__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __snake_case ( __lowerCAmelCase):
'''simple docstring'''
def __init__( self , a_ = 0.001 , a_ = 0.9 , a_ = 0.999 , a_ = 1E-7 , a_ = False , a_ = 0.0 , a_ = None , a_ = None , a_ = "AdamWeightDecay" , **a_ , ):
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
a__ = weight_decay_rate
a__ = include_in_weight_decay
a__ = exclude_from_weight_decay
@classmethod
def _a ( cls , a_ ):
a__ = {"""WarmUp""": WarmUp}
return super(lowerCAmelCase_ , cls ).from_config(lowerCAmelCase_ , custom_objects=lowerCAmelCase_ )
def _a ( self , a_ , a_ , a_ ):
super(lowerCAmelCase_ , self )._prepare_local(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a__ = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a ( self , a_ , a_ , a_ ):
a__ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a ( self , a_ , a_=None , **a_ ):
a__ , a__ = list(zip(*lowerCAmelCase_ ) )
return super(lowerCAmelCase_ , self ).apply_gradients(zip(lowerCAmelCase_ , lowerCAmelCase_ ) , name=lowerCAmelCase_ , **lowerCAmelCase_ )
def _a ( self , a_ , a_ , a_ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
a__ = apply_state or {}
a__ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
a__ = self._fallback_apply_state(lowerCAmelCase_ , lowerCAmelCase_ )
a__ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a ( self , a_ , a_ , a_=None ):
a__ , a__ = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase_ )
a__ = self._decay_weights_op(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase_ , self )._resource_apply_dense(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _a ( self , a_ , a_ , a_ , a_=None ):
a__ , a__ = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase_ )
a__ = self._decay_weights_op(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase_ , self )._resource_apply_sparse(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _a ( self ):
a__ = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a ( self , a_ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ) is not None:
return False
return True
class __snake_case ( __lowerCAmelCase):
'''simple docstring'''
def __init__( self ):
a__ = []
a__ = None
@property
def _a ( self ):
if self._accum_steps is None:
a__ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCAmelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , a_ ):
if not self._gradients:
a__ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase_ ) , trainable=lowerCAmelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCAmelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowerCAmelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase_ )
self._accum_steps.assign_add(1 )
def _a ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase_ ) )
| 717 |
from __future__ import annotations
UpperCAmelCase = 8.988E9 # units = N * m^s * C^-2
def A_ ( __a : float , __a : float , __a : float , __a : float ):
"""simple docstring"""
a__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
a__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
a__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
a__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
a__ = (COULOMBS_CONSTANT * charge_product / abs(__a )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowercase = 42
_lowercase = None
_lowercase = None
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[Any] ) -> bool:
"""simple docstring"""
def is_valid_tree(lowerCAmelCase_ : int ) -> bool:
if node is None:
return True
if not isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCAmelCase_ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
lowerCAmelCase_ : int ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : Optional[Any] ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,lowerCAmelCase_ ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,lowerCAmelCase_ )
)
return is_binary_search_tree_recursive_check(lowerCAmelCase_ ,-float('inf' ) ,float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
import math
import sys
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : Tuple = ''
try:
with open(a , 'rb' ) as binary_file:
__A : List[str] = binary_file.read()
for dat in data:
__A : Optional[Any] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = {'0': '0', '1': '1'}
__A , __A : str = '', ''
__A : List[str] = len(a )
for i in range(len(a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__A : Dict = lexicon[curr_string]
result += last_match_id
__A : List[str] = last_match_id + '0'
if math.loga(a ).is_integer():
__A : int = {}
for curr_key in list(a ):
__A : Union[str, Any] = lexicon.pop(a )
__A : List[Any] = new_lex
__A : int = last_match_id + '1'
index += 1
__A : Any = ''
return result
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
__A : int = 8
try:
with open(a , 'wb' ) as opened_file:
__A : str = [
to_write[i : i + byte_length]
for i in range(0 , len(a ) , a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(a , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : int = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__A : str = data_bits[counter:]
__A : List[str] = data_bits[counter + 1 :]
return data_bits
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
__A : int = read_file_binary(a )
__A : Union[str, Any] = remove_prefix(a )
__A : int = decompress_data(a )
write_file_binary(a , a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 239 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __init__( self : List[str] , _lowercase : Callable , _lowercase : Optional[Features] = None , _lowercase : str = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : Optional[dict] = None , _lowercase : Optional[int] = None , **_lowercase : List[Any] , ):
super().__init__(
features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , )
A = Generator(
cache_dir=_lowercase , features=_lowercase , generator=_lowercase , gen_kwargs=_lowercase , **_lowercase , )
def __a ( self : int ):
# Build iterable dataset
if self.streaming:
A = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
A = None
A = None
A = None
A = None
self.builder.download_and_prepare(
download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , )
A = self.builder.as_dataset(
split='train' , verification_mode=_lowercase , in_memory=self.keep_in_memory )
return dataset
| 720 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase : Optional[Any] = float("nan")
class lowerCamelCase__ :
def __init__( self : List[str] , _lowercase : Union[str, Any] ):
A = sys.stdout
A = open(_lowercase , 'a' )
def __getattr__( self : Tuple , _lowercase : Union[str, Any] ):
return getattr(self.stdout , _lowercase )
def __a ( self : Optional[int] , _lowercase : Union[str, Any] ):
self.stdout.write(_lowercase )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , _lowercase , 0 , re.M ) )
def __snake_case ( UpperCamelCase__=80 , UpperCamelCase__=False ) -> Union[str, Any]:
"""simple docstring"""
A = []
# deal with critical env vars
A = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
A = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
A = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A = []
A = ''
while len(UpperCamelCase__ ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
A = ''
return "\\\n".join(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
A = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
A = variation.replace(' ' , '-' )
with open(Path(UpperCamelCase__ ) / f'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
A = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
"""simple docstring"""
A = []
A = []
A = f'{id}: {variation:<{longest_variation_len}}'
A = f'{preamble}: '
A = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
A = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
A = f'\33[2K\r{outcome}'
if len(UpperCamelCase__ ) > 0:
A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A = round(mean_metrics[target_metric_key] , 2 )
A = f'{outcome} {mean_target}'
if len(UpperCamelCase__ ) > 1:
results_str += f' {tuple(round(UpperCamelCase__ , 2 ) for x in results )}'
print(UpperCamelCase__ )
A = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
A = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
A = pd.DataFrame(UpperCamelCase__ )
A = 'variation'
A = 'diff_%'
A = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
A = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
A = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
A = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A = df.reindex(UpperCamelCase__ , axis='columns' ) # reorder cols
# capitalize
A = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
A = df.rename(lambda UpperCamelCase__ : c.replace('_' , '<br>' ) , axis='columns' )
A = df.rename(lambda UpperCamelCase__ : c.replace('_' , '\n' ) , axis='columns' )
A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt='.2f' )]
print('\n\n'.join(UpperCamelCase__ ) )
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs='+' , required=UpperCamelCase__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=UpperCamelCase__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=UpperCamelCase__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=UpperCamelCase__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=UpperCamelCase__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
A = parser.parse_args()
A = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
A = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
A = [list(map(str.strip , re.split(r'\|' , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A = list(map(str.strip , map(' '.join , itertools.product(*UpperCamelCase__ ) ) ) )
A = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
A = args.report_metric_keys.split()
# capture prints into a log file for convenience
A = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
A = Tee(UpperCamelCase__ )
print(f'\n*** Running {len(UpperCamelCase__ )} benchmarks:' )
print(f'Base command: {" ".join(UpperCamelCase__ )}' )
A = 'variation'
A = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc='Total completion: ' , leave=UpperCamelCase__ ) ):
A = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 91 | 0 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = len(A )
_a = [0] * len_array
if len_array > 0:
_a = array[0]
for i in range(1 , A ):
_a = self.prefix_sum[i - 1] + array[i]
def a__ (self , A , A ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self , A ) -> bool:
"""simple docstring"""
_a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 1 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE_ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
"""simple docstring"""
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 201 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self : int ):
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__a = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
@slow
def __a ( self : int ):
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__a = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 201 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE( self :str ) ->Optional[int]:
lowercase = torch.nn.Linear(10 , 10 )
lowercase = torch.optim.SGD(model.parameters() , 0.1 )
lowercase = Accelerator()
lowercase = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 441 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_snake_case : Any = False
_snake_case : int = False
def __snake_case ( __magic_name__ ):
'''simple docstring'''
return TrainCommand(__magic_name__ )
class UpperCamelCase_ ( __a ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE( lowerCAmelCase__ :ArgumentParser ) ->str:
lowercase = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=lowerCAmelCase__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=lowerCAmelCase__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=lowerCAmelCase__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=lowerCAmelCase__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=lowerCAmelCase__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=lowerCAmelCase__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=lowerCAmelCase__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=lowerCAmelCase__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=lowerCAmelCase__ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=lowerCAmelCase__ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=lowerCAmelCase__ , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=lowerCAmelCase__ , default=1E-0_8 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self :Optional[int] , lowerCAmelCase__ :Namespace ) ->str:
lowercase = logging.get_logger("transformers-cli/training" )
lowercase = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=lowerCAmelCase__ )
lowercase = args.output
lowercase = args.column_label
lowercase = args.column_text
lowercase = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
lowercase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
lowercase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase = args.validation_split
lowercase = args.train_batch_size
lowercase = args.valid_batch_size
lowercase = args.learning_rate
lowercase = args.adam_epsilon
def SCREAMING_SNAKE_CASE( self :List[str] ) ->List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Dict:
raise NotImplementedError
def SCREAMING_SNAKE_CASE( self :Any ) ->str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 441 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowercase : Tuple =logging.get_logger(__name__)
__lowercase : Optional[int] ={
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
__lowercase : int ={
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
__lowercase : List[str] ={
'jukebox': 512,
}
class A ( __lowercase ):
_snake_case =VOCAB_FILES_NAMES
_snake_case =PRETRAINED_VOCAB_FILES_MAP
_snake_case =PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case =['''input_ids''', '''attention_mask''']
def __init__( self: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any]=["v3", "v2", "v2"] , _lowerCAmelCase: Any=512 , _lowerCAmelCase: str=5 , _lowerCAmelCase: Tuple="<|endoftext|>" , **_lowerCAmelCase: Optional[int] , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
super().__init__(
unk_token=_lowerCAmelCase , n_genres=_lowerCAmelCase , version=_lowerCAmelCase , max_n_lyric_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase_ =version
UpperCAmelCase_ =max_n_lyric_tokens
UpperCAmelCase_ =n_genres
with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
UpperCAmelCase_ =r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCAmelCase_ =oov.replace(r"\-'" , r"\-+'" )
UpperCAmelCase_ =regex.compile(_lowerCAmelCase )
UpperCAmelCase_ ={v: k for k, v in self.artists_encoder.items()}
UpperCAmelCase_ ={v: k for k, v in self.genres_encoder.items()}
UpperCAmelCase_ ={v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: List[str] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =[self.artists_encoder.get(_lowerCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_lowerCAmelCase ) ):
UpperCAmelCase_ =[self.genres_encoder.get(_lowerCAmelCase , 0 ) for genre in list_genres[genres]]
UpperCAmelCase_ =list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCAmelCase_ =[[self.lyrics_encoder.get(_lowerCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Union[str, Any] ) -> Tuple:
'''simple docstring'''
return list(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: List[str] , _lowerCAmelCase: Any , _lowerCAmelCase: Dict , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_for_tokenization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =self._tokenize(_lowerCAmelCase )
return artist, genre, lyrics
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCAmelCase_ =artists[idx].lower()
UpperCAmelCase_ =[genres[idx].lower()]
else:
UpperCAmelCase_ =self._normalize(artists[idx] ) + ".v2"
UpperCAmelCase_ =[
self._normalize(_lowerCAmelCase ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCAmelCase_ =regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
UpperCAmelCase_ ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
UpperCAmelCase_ ={vocab[index]: index + 1 for index in range(len(_lowerCAmelCase ) )}
UpperCAmelCase_ =0
UpperCAmelCase_ =len(_lowerCAmelCase ) + 1
UpperCAmelCase_ =self.vocab
UpperCAmelCase_ ={v: k for k, v in self.vocab.items()}
UpperCAmelCase_ =""
else:
UpperCAmelCase_ =regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
UpperCAmelCase_ =self._run_strip_accents(_lowerCAmelCase )
UpperCAmelCase_ =lyrics.replace("\\" , "\n" )
UpperCAmelCase_ =self.out_of_vocab.sub("" , _lowerCAmelCase ), [], []
return artists, genres, lyrics
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =unicodedata.normalize("NFD" , _lowerCAmelCase )
UpperCAmelCase_ =[]
for char in text:
UpperCAmelCase_ =unicodedata.category(_lowerCAmelCase )
if cat == "Mn":
continue
output.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: str ) -> str:
'''simple docstring'''
UpperCAmelCase_ =(
[chr(_lowerCAmelCase ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(_lowerCAmelCase ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(_lowerCAmelCase ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
UpperCAmelCase_ =frozenset(_lowerCAmelCase )
UpperCAmelCase_ =re.compile(r"_+" )
UpperCAmelCase_ ="".join([c if c in accepted else "_" for c in text.lower()] )
UpperCAmelCase_ =pattern.sub("_" , _lowerCAmelCase ).strip("_" )
return text
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: List[str] ) -> str:
'''simple docstring'''
return " ".join(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: bool = False ) -> Dict:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =TensorType(_lowerCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
UpperCAmelCase_ =tf.constant
UpperCAmelCase_ =tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
UpperCAmelCase_ =torch.tensor
UpperCAmelCase_ =torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
UpperCAmelCase_ =jnp.array
UpperCAmelCase_ =_is_jax
else:
UpperCAmelCase_ =np.asarray
UpperCAmelCase_ =_is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCAmelCase_ =[inputs]
if not is_tensor(_lowerCAmelCase ):
UpperCAmelCase_ =as_tensor(_lowerCAmelCase )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[str] , _lowerCAmelCase: str="" , _lowerCAmelCase: Optional[Any]="pt" ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase_ =[0, 0, 0]
UpperCAmelCase_ =[artist] * len(self.version )
UpperCAmelCase_ =[genres] * len(self.version )
UpperCAmelCase_ =self.tokenize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =self._convert_token_to_id(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =[-INFINITY] * len(full_tokens[-1] )
UpperCAmelCase_ =[
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_lowerCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_ =os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_lowerCAmelCase ) )
UpperCAmelCase_ =os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_lowerCAmelCase ) )
UpperCAmelCase_ =os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_lowerCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Dict , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.artists_decoder.get(_lowerCAmelCase )
UpperCAmelCase_ =[self.genres_decoder.get(_lowerCAmelCase ) for genre in genres_index]
UpperCAmelCase_ =[self.lyrics_decoder.get(_lowerCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 706 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =1
UpperCAmelCase_ =3
UpperCAmelCase_ =(32, 32)
UpperCAmelCase_ =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCAmelCase__ ( self: Dict ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowerCAmelCase )
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
def extract(*_lowerCAmelCase: List[str] , **_lowerCAmelCase: List[str] ):
class A :
def __init__( self: List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =torch.ones([0] )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Dict ) -> List[Any]:
'''simple docstring'''
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.dummy_cond_unet
UpperCAmelCase_ =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
UpperCAmelCase_ =self.dummy_vae
UpperCAmelCase_ =self.dummy_text_encoder
UpperCAmelCase_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ =StableDiffusionPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A painting of a squirrel eating a burger"
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase_ =sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase_ =sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.dummy_cond_unet
UpperCAmelCase_ =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
UpperCAmelCase_ =self.dummy_vae
UpperCAmelCase_ =self.dummy_text_encoder
UpperCAmelCase_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ =StableDiffusionPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A painting of a squirrel eating a burger"
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase_ =sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase_ =sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert isinstance(pipe.scheduler , _lowerCAmelCase )
assert pipe.safety_checker is None
UpperCAmelCase_ =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_cond_unet
UpperCAmelCase_ =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
UpperCAmelCase_ =self.dummy_vae
UpperCAmelCase_ =self.dummy_text_encoder
UpperCAmelCase_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
UpperCAmelCase_ =unet.half()
UpperCAmelCase_ =vae.half()
UpperCAmelCase_ =bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ =StableDiffusionPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A painting of a squirrel eating a burger"
UpperCAmelCase_ =sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =(
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
UpperCAmelCase_ =40_0366_0346
UpperCAmelCase_ =7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =[0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =[0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="padme amidala taking a bath artwork, safe for work, no nudity"
UpperCAmelCase_ =27_3497_1755
UpperCAmelCase_ =7
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =[0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =[0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self: str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =(
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
UpperCAmelCase_ =10_4435_5234
UpperCAmelCase_ =12
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 550 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a :Tuple = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
a :Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
a :Optional[Any] = dict(zip(vocab, range(len(vocab))))
a :str = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
a :Union[str, Any] = Path(tmpdirname)
a :Tuple = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
a :List[str] = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
a :Dict = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
a :Dict = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a :List[Any] = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a :List[Any] = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
a :Tuple = tokenizer(["Making tiny model"], return_tensors="pt")
a :Any = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 680 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = 10
def __A ( self ):
A__ = [1, 2, 3, 4]
A__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = ""
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
A__ , A__ = process_story(UpperCAmelCase__ )
A__ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = ["It was the best of times."]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4] )
A__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = 101
A__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 491 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> set:
_snake_case = set()
# edges = list of graph's edges
_snake_case = get_edges(__lowerCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_snake_case = edges.pop()
chosen_vertices.add(__lowerCAmelCase )
chosen_vertices.add(__lowerCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowerCAmelCase )
return chosen_vertices
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> set:
_snake_case = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 712 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Dict:
_snake_case = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> List[str]:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_snake_case = emb.weight.data
return lin_layer
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Any:
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
_snake_case = Namespace(**checkpoint['''cfg''']['''model'''] )
_snake_case = checkpoint['''model''']
remove_ignore_keys_(__lowerCamelCase )
_snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_snake_case = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
_snake_case = XGLMConfig(
vocab_size=__lowerCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_snake_case = XGLMForCausalLM(__lowerCamelCase )
_snake_case = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
print(__lowerCamelCase )
_snake_case = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 430 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''pixel_values''']
def __init__( self : str , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : float = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 255 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = size if size is not None else {"shortest_edge": 384}
_A = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_A = do_resize
_A = size
# Default value set here for backwards compatibility where the value in config is None
_A = crop_pct if crop_pct is not None else 224 / 256
_A = resample
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : float , __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
_A = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_A = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_A = int(shortest_edge / crop_pct )
_A = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_A = resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[int, float] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : int , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : float = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : float = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = do_resize if do_resize is not None else self.do_resize
_A = crop_pct if crop_pct is not None else self.crop_pct
_A = resample if resample is not None else self.resample
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = size if size is not None else self.size
_A = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_A = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_A = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_A = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
_A = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_A = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_A = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_A = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 330 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __lowercase ( __lowercase ) -> str: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def __lowercase ( __lowercase ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = 42
snake_case = 42
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = {}
_A = []
_A = 1
_A = [1, 2]
_A = {"a": 1, "b": 2}
_A = {"a": [1, 2], "b": [3, 4]}
_A = {"a": {"1": 1}, "b": 2}
_A = {"a": 1, "b": 2, "c": 3, "d": 4}
_A = {}
_A = []
_A = 2
_A = [2, 3]
_A = {"a": 2, "b": 3}
_A = {"a": [2, 3], "b": [4, 5]}
_A = {"a": {"1": 2}, "b": 3}
_A = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
_A = 2
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) , __UpperCAmelCase )
_A = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_A = {"a": 2, "b": 0, "c": 2}
_A = {
"a": np.eye(2 ).astype(__UpperCAmelCase ),
"b": np.zeros(3 ).astype(__UpperCAmelCase ),
"c": np.ones(2 ).astype(__UpperCAmelCase ),
}
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , map_numpy=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__UpperCAmelCase , __UpperCAmelCase , map_numpy=__UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__UpperCAmelCase , __UpperCAmelCase , map_numpy=__UpperCAmelCase , num_proc=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__UpperCAmelCase , __UpperCAmelCase , map_numpy=__UpperCAmelCase , num_proc=__UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda __UpperCAmelCase : x + 1 , __UpperCAmelCase , num_proc=__UpperCAmelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = {"a": 1, "b": 2}
_A = {"a": 3, "b": 4}
_A = {"a": 5, "b": 6}
_A = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ) , __UpperCAmelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
class _UpperCAmelCase :
"""simple docstring"""
snake_case = '''bar'''
_A = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(__UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_A = {F'''{i}''': i for i in range(__lowercase )}
_A = map_nested(lambda __lowercase : x + 10 , __lowercase , num_proc=__lowercase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@require_tf
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
_A = layers.Dense(2 )
def gen_random_output():
_A = tf.random.uniform((1, 3) )
return model(__UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=__UpperCAmelCase ):
_A = gen_random_output()
with temp_seed(42 , set_tensorflow=__UpperCAmelCase ):
_A = gen_random_output()
_A = gen_random_output()
np.testing.assert_equal(__UpperCAmelCase , __UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
import torch
def gen_random_output():
_A = torch.nn.Linear(3 , 2 )
_A = torch.rand(1 , 3 )
return model(__UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=__UpperCAmelCase ):
_A = gen_random_output()
with temp_seed(42 , set_pytorch=__UpperCAmelCase ):
_A = gen_random_output()
_A = gen_random_output()
np.testing.assert_equal(__UpperCAmelCase , __UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_A = gen_random_output()
with temp_seed(42 ):
_A = gen_random_output()
_A = gen_random_output()
np.testing.assert_equal(__UpperCAmelCase , __UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = NestedDataStructure(__lowercase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = NestedDataStructure(__lowercase ).flatten()
assert output == expected_output
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
_A = A(x=1 , y="foobar" )
_A = {"x": 1, "y": "foobar"}
assert asdict(__lowercase ) == expected_output
_A = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
_A = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__lowercase ) == expected_output
with pytest.raises(__lowercase ):
asdict([1, A(x=10 , y="foo" )] )
def __lowercase ( __lowercase ) -> Union[str, Any]:
'''simple docstring'''
return text.split()
def __lowercase ( __lowercase ) -> Union[str, Any]:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
with Pool(2 ) as pool:
_A = list(iflatmap_unordered(__lowercase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__lowercase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_A = list(iflatmap_unordered(__lowercase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__lowercase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_A = []
for yield_time, content in iflatmap_unordered(
__lowercase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__lowercase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__lowercase ) == 4
| 330 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 1 |
'''simple docstring'''
import qiskit
def UpperCamelCase_ ( A__ = 2 ):
a_ = qubits
# Using Aer's simulator
a_ = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
a_ = qiskit.QuantumCircuit(A__ , A__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A__ ) ) , list(range(A__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
a_ = qiskit.execute(A__ , A__ , shots=10_00 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 263 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase__ =logging.get_logger(__name__)
# General docstring
lowercase__ ='PoolFormerConfig'
# Base docstring
lowercase__ ='sail/poolformer_s12'
lowercase__ =[1, 5_12, 7, 7]
# Image classification docstring
lowercase__ ='sail/poolformer_s12'
lowercase__ ='tabby, tabby cat'
lowercase__ =[
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase_ ( A__ , A__ = 0.0 , A__ = False ):
if drop_prob == 0.0 or not training:
return input
a_ = 1 - drop_prob
a_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
a_ = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
a_ = input.div(A__ ) * random_tensor
return output
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase = None ):
super().__init__()
a_ = drop_prob
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return drop_path(UpperCAmelCase , self.drop_prob , self.training )
def lowerCAmelCase__ ( self ):
return "p={}".format(self.drop_prob )
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ):
super().__init__()
a_ = patch_size if isinstance(UpperCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
a_ = stride if isinstance(UpperCAmelCase , collections.abc.Iterable ) else (stride, stride)
a_ = padding if isinstance(UpperCAmelCase , collections.abc.Iterable ) else (padding, padding)
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=UpperCAmelCase )
a_ = norm_layer(UpperCAmelCase ) if norm_layer else nn.Identity()
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.projection(UpperCAmelCase )
a_ = self.norm(UpperCAmelCase )
return embeddings
class a_ ( nn.GroupNorm ):
def __init__( self , UpperCAmelCase , **UpperCAmelCase ):
super().__init__(1 , UpperCAmelCase , **UpperCAmelCase )
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = nn.AvgPoolad(UpperCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return self.pool(UpperCAmelCase ) - hidden_states
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
super().__init__()
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
a_ = PoolFormerDropPath(UpperCAmelCase )
if isinstance(config.hidden_act , UpperCAmelCase ):
a_ = ACTaFN[config.hidden_act]
else:
a_ = config.hidden_act
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.conva(UpperCAmelCase )
a_ = self.act_fn(UpperCAmelCase )
a_ = self.drop(UpperCAmelCase )
a_ = self.conva(UpperCAmelCase )
a_ = self.drop(UpperCAmelCase )
return hidden_states
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
super().__init__()
a_ = PoolFormerPooling(UpperCAmelCase )
a_ = PoolFormerOutput(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = PoolFormerGroupNorm(UpperCAmelCase )
a_ = PoolFormerGroupNorm(UpperCAmelCase )
# Useful for training neural nets
a_ = PoolFormerDropPath(UpperCAmelCase ) if drop_path > 0.0 else nn.Identity()
a_ = config.use_layer_scale
if config.use_layer_scale:
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCAmelCase) ) , requires_grad=UpperCAmelCase )
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCAmelCase) ) , requires_grad=UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if self.use_layer_scale:
a_ = self.pooling(self.before_norm(UpperCAmelCase ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
a_ = hidden_states + self.drop_path(UpperCAmelCase )
a_ = ()
a_ = self.output(self.after_norm(UpperCAmelCase ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
a_ = hidden_states + self.drop_path(UpperCAmelCase )
a_ = (output,) + outputs
return outputs
else:
a_ = self.drop_path(self.pooling(self.before_norm(UpperCAmelCase ) ) )
# First residual connection
a_ = pooling_output + hidden_states
a_ = ()
# Second residual connection inside the PoolFormerOutput block
a_ = self.drop_path(self.output(self.after_norm(UpperCAmelCase ) ) )
a_ = hidden_states + layer_output
a_ = (output,) + outputs
return outputs
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = config
# stochastic depth decay rule
a_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
a_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
a_ = nn.ModuleList(UpperCAmelCase )
# Transformer blocks
a_ = []
a_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
a_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCAmelCase ) )
a_ = nn.ModuleList(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True ):
a_ = () if output_hidden_states else None
a_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
a_ , a_ = layers
# Get patch embeddings from hidden_states
a_ = embedding_layer(UpperCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCAmelCase ):
a_ = blk(UpperCAmelCase )
a_ = layer_outputs[0]
if output_hidden_states:
a_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = PoolFormerConfig
lowerCamelCase__ : Optional[Any] = 'poolformer'
lowerCamelCase__ : List[Any] = 'pixel_values'
lowerCamelCase__ : int = True
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
a_ = value
lowercase__ =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config
a_ = PoolFormerEncoder(UpperCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
a_ = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , )
a_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.dense(UpperCAmelCase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config.num_labels
a_ = PoolFormerModel(UpperCAmelCase )
# Final norm
a_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
a_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.poolformer(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , )
a_ = outputs[0]
a_ = self.classifier(self.norm(UpperCAmelCase ).mean([-2, -1] ) )
a_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ = """single_label_classification"""
else:
a_ = """multi_label_classification"""
if self.config.problem_type == "regression":
a_ = MSELoss()
if self.num_labels == 1:
a_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a_ = BCEWithLogitsLoss()
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
a_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
| 263 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Any = """MCTCTFeatureExtractor"""
UpperCAmelCase : Tuple = """AutoTokenizer"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any]):
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = self.feature_extractor
a : Tuple = False
def __call__( self : int , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str]):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a : List[str] = kwargs.pop("raw_speech")
else:
a : Union[str, Any] = kwargs.pop("audio" , __UpperCAmelCase)
a : Optional[Any] = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a : List[Any] = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a : int = args[0]
a : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a : Union[str, Any] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a : int = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a : Dict = encodings["input_ids"]
return inputs
def __snake_case ( self : Optional[int] , *__UpperCAmelCase : Any , **__UpperCAmelCase : str):
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : List[str] , *__UpperCAmelCase : str , **__UpperCAmelCase : Union[str, Any]):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__UpperCAmelCase , **__UpperCAmelCase)
a : Optional[int] = kwargs.pop("input_features" , __UpperCAmelCase)
a : Optional[int] = kwargs.pop("labels" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a : int = args[0]
a : Dict = args[1:]
if input_features is not None:
a : Union[str, Any] = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase)
if labels is not None:
a : Any = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a : Dict = labels["input_ids"]
return input_features
def __snake_case ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : int):
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def __snake_case ( self : Optional[Any]):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a : str = True
a : str = self.tokenizer
yield
a : Union[str, Any] = self.feature_extractor
a : int = False
| 135 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowercase = float("""nan""")
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : Optional[int]):
a : Any = sys.stdout
a : Any = open(__UpperCAmelCase , "a")
def __getattr__( self : Dict , __UpperCAmelCase : List[Any]):
return getattr(self.stdout , __UpperCAmelCase)
def __snake_case ( self : Any , __UpperCAmelCase : Any):
self.stdout.write(__UpperCAmelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __UpperCAmelCase , 0 , re.M))
def lowercase ( A_=80 , A_=False )-> List[str]:
'''simple docstring'''
a : List[Any] = []
# deal with critical env vars
a : List[Any] = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
a : Any = os.environ.get(A_ , A_ )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
a : List[Any] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(A_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
a : Any = []
a : Any = ""
while len(A_ ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(A_ ) == 0 or len(A_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(A_ )
a : List[Any] = ""
return "\\\n".join(A_ )
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
a : List[str] = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
a : Optional[int] = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
a : Dict = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ )-> int:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
a : Optional[Any] = subprocess.run(A_ , capture_output=A_ , text=A_ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
a : List[str] = variation.replace(" " , "-" )
with open(Path(A_ ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(A_ ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
a : Dict = json.load(A_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> Tuple:
'''simple docstring'''
a : List[Any] = []
a : List[str] = []
a : Union[str, Any] = F'''{id}: {variation:<{longest_variation_len}}'''
a : Any = F'''{preamble}: '''
a : Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(A_ ) , desc=A_ , leave=A_ ):
a : Dict = process_run_single(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
a : Tuple = single_run_metrics[target_metric_key]
if not math.isnan(A_ ):
metrics.append(A_ )
results.append(A_ )
outcome += "✓"
else:
outcome += "✘"
a : List[str] = F'''\33[2K\r{outcome}'''
if len(A_ ) > 0:
a : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
a : Tuple = round(mean_metrics[target_metric_key] , 2 )
a : Optional[int] = F'''{outcome} {mean_target}'''
if len(A_ ) > 1:
results_str += F''' {tuple(round(A_ , 2 ) for x in results )}'''
print(A_ )
a : Optional[int] = variation
return mean_metrics
else:
print(A_ )
return {variation_key: variation, target_metric_key: nan}
def lowercase ( )-> Any:
'''simple docstring'''
a : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
a : Optional[Any] = pd.DataFrame(A_ )
a : Tuple = "variation"
a : Union[str, Any] = "diff_%"
a : Optional[Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
a : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(A_ ):
# as a fallback, use the minimal value as the sentinel
a : Optional[int] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(A_ ):
a : Tuple = df.apply(
lambda A_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
a : str = [variation_key, target_metric_key, diff_key, *report_metric_keys]
a : Tuple = df.reindex(A_ , axis="columns" ) # reorder cols
# capitalize
a : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
a : Dict = df.rename(lambda A_ : c.replace("_" , "<br>" ) , axis="columns" )
a : Tuple = df.rename(lambda A_ : c.replace("_" , "\n" ) , axis="columns" )
a : Dict = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=A_ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=A_ , floatfmt=".2f" )]
print("\n\n".join(A_ ) )
def lowercase ( )-> List[str]:
'''simple docstring'''
a : str = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=A_ , type=A_ , required=A_ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=A_ , type=A_ , nargs="+" , required=A_ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=A_ , type=A_ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=A_ , type=A_ , required=A_ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=A_ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=A_ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=A_ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=A_ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
a : int = parser.parse_args()
a : str = args.output_dir
Path(A_ ).mkdir(exist_ok=A_ )
a : Tuple = get_base_command(A_ , A_ )
# split each dimension into its --foo variations
a : Optional[int] = [list(map(str.strip , re.split(R"\|" , A_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
a : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*A_ ) ) ) )
a : str = max(len(A_ ) for x in variations )
# split wanted keys
a : Tuple = args.report_metric_keys.split()
# capture prints into a log file for convenience
a : Optional[int] = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
a : str = Tee(A_ )
print(F'''\n*** Running {len(A_ )} benchmarks:''' )
print(F'''Base command: {" ".join(A_ )}''' )
a : str = "variation"
a : List[Any] = []
for id, variation in enumerate(tqdm(A_ , desc="Total completion: " , leave=A_ ) ):
a : List[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , A_ , A_ , A_ , A_ , args.target_metric_key , A_ , args.repeat_times , A_ , args.verbose , ) )
process_results(A_ , args.target_metric_key , A_ , args.base_variation , A_ )
if __name__ == "__main__":
main()
| 135 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> List[Any]:
_A = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1_024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1_024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
_A = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_A = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case_ , output_all_encodings=snake_case_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_A = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
_A = os.path.join(get_home_dir() , '''models''' )
_A = _load_vocab(snake_case_ , snake_case_ , snake_case_ , cls=snake_case_ )
_A = nlp.model.BERTModel(
snake_case_ , len(snake_case_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case_ , use_token_type_embed=snake_case_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case_ , use_decoder=snake_case_ , )
original_bort.load_parameters(snake_case_ , cast_dtype=snake_case_ , ignore_extra=snake_case_ )
_A = original_bort._collect_params_with_prefix()
# Build our config 🤗
_A = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(snake_case_ ),
}
_A = BertConfig.from_dict(snake_case_ )
_A = BertForMaskedLM(snake_case_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__snake_case ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__snake_case , __snake_case ):
_A = hf_param.shape
_A = to_torch(params[gluon_param] )
_A = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
_A = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
_A = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
_A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
_A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_A = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_A = hf_bort_model.bert.encoder.layer[i]
# self attention
_A = layer.attention.self
_A = check_and_map_params(
self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
_A = check_and_map_params(
self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
_A = check_and_map_params(
self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
_A = check_and_map_params(
self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
_A = check_and_map_params(
self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
_A = check_and_map_params(
self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
_A = layer.attention.output
_A = check_and_map_params(
self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' )
_A = check_and_map_params(
self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' )
_A = check_and_map_params(
self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
_A = check_and_map_params(
self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
_A = layer.intermediate
_A = check_and_map_params(
intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
_A = check_and_map_params(
intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
_A = layer.output
_A = check_and_map_params(
bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
_A = check_and_map_params(
bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
_A = check_and_map_params(
bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
_A = check_and_map_params(
bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_A = RobertaTokenizer.from_pretrained('''roberta-base''' )
_A = tokenizer.encode_plus(snake_case_ )["""input_ids"""]
# Get gluon output
_A = mx.nd.array([input_ids] )
_A = original_bort(inputs=snake_case_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case_ )
_A = BertModel.from_pretrained(snake_case_ )
hf_bort_model.eval()
_A = tokenizer.encode_plus(snake_case_ , return_tensors='''pt''' )
_A = hf_bort_model(**snake_case_ )[0]
_A = output_gluon[0].asnumpy()
_A = output_hf[0].detach().numpy()
_A = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_A = np.allclose(snake_case_ , snake_case_ , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 317 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(snake_case_ )
UpperCamelCase : Dict = flatten_dict(snake_case_ )
return flax_params
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Tuple = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
UpperCamelCase : Optional[int] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCamelCase : List[str] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCamelCase : List[str] = new_key.replace(snake_case_ ,snake_case_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCamelCase : str = new_key.replace(snake_case_ ,snake_case_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCamelCase : List[str] = re.sub(R"""layers_(\d+)""" ,R"""layer.\1""" ,snake_case_ )
UpperCamelCase : Union[str, Any] = new_key.replace("""encoder""" ,"""encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCamelCase : Any = re.sub(R"""layers_(\d+)""" ,R"""layer.\1""" ,snake_case_ )
UpperCamelCase : Any = flax_dict[key]
UpperCamelCase : Optional[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCamelCase : Union[str, Any] = torch.from_numpy(converted_dict[key].T )
else:
UpperCamelCase : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Dict ,snake_case_ : str=False ,snake_case_ : Dict=False ):
'''simple docstring'''
UpperCamelCase : Optional[int] = get_flax_param(snake_case_ )
if not use_large:
UpperCamelCase : List[str] = PixaStructVisionConfig()
UpperCamelCase : int = PixaStructTextConfig()
else:
UpperCamelCase : List[str] = PixaStructVisionConfig(
hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_attention_heads=2_4 ,num_hidden_layers=1_8 )
UpperCamelCase : Tuple = PixaStructTextConfig(hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_heads=2_4 ,num_layers=1_8 )
UpperCamelCase : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() ,text_config=decoder_config.to_dict() ,is_vqa=snake_case_ )
UpperCamelCase : Optional[int] = PixaStructForConditionalGeneration(snake_case_ )
UpperCamelCase : Optional[Any] = rename_and_convert_flax_params(snake_case_ )
model.load_state_dict(snake_case_ )
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
UpperCamelCase : Dict = PixaStructImageProcessor()
UpperCamelCase : List[str] = PixaStructProcessor(image_processor=snake_case_ ,tokenizer=snake_case_ )
if use_large:
UpperCamelCase : int = 4_0_9_6
UpperCamelCase : int = True
# mkdir if needed
os.makedirs(snake_case_ ,exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
print("""Model saved in {}""".format(snake_case_ ) )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__A : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 499 | 0 |
from math import factorial
def snake_case_ ( snake_case , snake_case , snake_case ) -> float:
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(snake_case , snake_case ) or not isinstance(snake_case , snake_case ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
lowercase__: int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase__: List[str] = float(factorial(snake_case ) )
coefficient /= factorial(snake_case ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 335 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Union[str, Any] = 'upernet'
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=512 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=[1, 2, 3, 6] , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=384 , lowerCAmelCase__=256 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=255 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase__: str = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: str = backbone_config.get('model_type' )
lowercase__: Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__: Dict = config_class.from_dict(lowerCAmelCase__ )
lowercase__: List[Any] = backbone_config
lowercase__: Union[str, Any] = hidden_size
lowercase__: Tuple = initializer_range
lowercase__: Optional[int] = pool_scales
lowercase__: Union[str, Any] = use_auxiliary_head
lowercase__: Any = auxiliary_loss_weight
lowercase__: Tuple = auxiliary_in_channels
lowercase__: Optional[Any] = auxiliary_channels
lowercase__: List[Any] = auxiliary_num_convs
lowercase__: List[str] = auxiliary_concat_input
lowercase__: Any = loss_ignore_index
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = copy.deepcopy(self.__dict__ )
lowercase__: List[Any] = self.backbone_config.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 335 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : str = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCamelCase (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = 'biogpt'
def __init__( self : Dict , __magic_name__ : Any=42_384 , __magic_name__ : List[Any]=1_024 , __magic_name__ : List[str]=24 , __magic_name__ : int=16 , __magic_name__ : int=4_096 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=1_024 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : List[str]=1e-12 , __magic_name__ : Tuple=True , __magic_name__ : List[Any]=True , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : List[str]=1 , __magic_name__ : Dict=0 , __magic_name__ : Optional[int]=2 , **__magic_name__ : Optional[int] , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = scale_embedding
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = activation_dropout
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 140 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ : Optional[Any] = BlenderbotSmallTokenizer
lowercase_ : List[str] = False
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
super().setUp()
__lowerCAmelCase = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__lowerCAmelCase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__lowerCAmelCase = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase__ ( self : Any , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : str ):
"""simple docstring"""
__lowerCAmelCase = "adapt act apte"
__lowerCAmelCase = "adapt act apte"
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = "adapt act apte"
__lowerCAmelCase = ["adapt", "act", "ap@@", "te"]
__lowerCAmelCase = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
__lowerCAmelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__lowerCAmelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_384]
__lowerCAmelCase = "I am a small frog."
__lowerCAmelCase = tok([src_text] , padding=snake_case__ , truncation=snake_case__ )["input_ids"]
__lowerCAmelCase = tok.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__lowerCAmelCase = "I am a small frog ."
__lowerCAmelCase = "."
__lowerCAmelCase = tok(snake_case__ )["input_ids"]
__lowerCAmelCase = tok(snake_case__ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 611 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return None
class UpperCamelCase_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
return None
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase__ , '''tf''' , 12 , **UpperCAmelCase__)
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase__ , '''pt''' , 12 , **UpperCAmelCase__)
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
'''simple docstring'''
from transformers import BertModel
A__ = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''') as vocab_file:
vocab_file.write('''\n'''.join(UpperCAmelCase__))
vocab_file.flush()
A__ = BertTokenizerFast(vocab_file.name)
with TemporaryDirectory() as bert_save_dir:
A__ = BertModel(BertConfig(vocab_size=len(UpperCAmelCase__)))
model.save_pretrained(UpperCAmelCase__)
self._test_export(UpperCAmelCase__ , '''pt''' , 12 , UpperCAmelCase__)
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(UpperCAmelCase__ , '''tf''' , 12 , **UpperCAmelCase__)
A__ = quantize(Path(UpperCAmelCase__))
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase__).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''')
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(UpperCAmelCase__ , '''pt''' , 12 , **UpperCAmelCase__)
A__ = quantize(UpperCAmelCase__)
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase__).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Optional[int]) ->Optional[Any]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
A__ = Path(UpperCAmelCase__).joinpath('''model.onnx''')
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__)
return path
except Exception as e:
self.fail(UpperCAmelCase__)
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
from transformers import BertModel
A__ = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random'''))
A__ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''')
self._test_infer_dynamic_axis(UpperCAmelCase__ , UpperCAmelCase__ , '''pt''')
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
from transformers import TFBertModel
A__ = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random'''))
A__ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''')
self._test_infer_dynamic_axis(UpperCAmelCase__ , UpperCAmelCase__ , '''tf''')
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int) ->Any:
'''simple docstring'''
A__ = FeatureExtractionPipeline(UpperCAmelCase__ , UpperCAmelCase__)
A__ = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
A__ , A__ , A__ , A__ = infer_shapes(UpperCAmelCase__ , UpperCAmelCase__)
# Assert all variables are present
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
self.assertTrue(all(var_name in shapes for var_name in variable_names))
self.assertSequenceEqual(variable_names[:3] , UpperCAmelCase__)
self.assertSequenceEqual(variable_names[3:] , UpperCAmelCase__)
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''})
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''})
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''})
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
A__ = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
A__ = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
A__ , A__ = ensure_valid_input(FuncContiguousArgs() , UpperCAmelCase__ , UpperCAmelCase__)
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCAmelCase__) , 3)
# Should have exactly the same input names
self.assertEqual(set(UpperCAmelCase__) , set(UpperCAmelCase__))
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCAmelCase__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']))
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A__ , A__ = ensure_valid_input(FuncNonContiguousArgs() , UpperCAmelCase__ , UpperCAmelCase__)
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCAmelCase__) , 1)
self.assertEqual(len(UpperCAmelCase__) , 1)
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''])
self.assertEqual(ordered_input_names[0] , '''input_ids''')
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''') , '''-test''')
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix())
| 177 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 0
while number > 0:
A__ = number % 10
sum_of_digits += last_digit
A__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int:
"""simple docstring"""
A__ = factorial(lowercase_ )
A__ = split_and_add(lowercase_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 177 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCamelCase__ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase__ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase__ = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase__ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase__ = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def A(__a: List[str] ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def A(__a: Union[str, Any] , __a: Optional[Any] , __a: Tuple , __a: Dict , __a: Optional[Any]=False ):
lowerCAmelCase_ = checkpoint[F"{old_prefix}.in_layers.0.weight"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.in_layers.0.bias"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.in_layers.2.weight"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.in_layers.2.bias"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.out_layers.0.weight"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.out_layers.0.bias"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.out_layers.3.weight"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
lowerCAmelCase_ = checkpoint[F"{old_prefix}.skip_connection.weight"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def A(__a: Optional[int] , __a: Tuple , __a: Union[str, Any] , __a: Tuple , __a: str=None ):
lowerCAmelCase_ = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
lowerCAmelCase_ = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
lowerCAmelCase_ = checkpoint[F"{old_prefix}.norm.weight"]
lowerCAmelCase_ = checkpoint[F"{old_prefix}.norm.bias"]
lowerCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase_ = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A(__a: str , __a: Union[str, Any] ):
lowerCAmelCase_ = torch.load(A_ , map_location="cpu" )
lowerCAmelCase_ = {}
lowerCAmelCase_ = checkpoint['''time_embed.0.weight''']
lowerCAmelCase_ = checkpoint['''time_embed.0.bias''']
lowerCAmelCase_ = checkpoint['''time_embed.2.weight''']
lowerCAmelCase_ = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase_ = checkpoint['''label_emb.weight''']
lowerCAmelCase_ = checkpoint['''input_blocks.0.0.weight''']
lowerCAmelCase_ = checkpoint['''input_blocks.0.0.bias''']
lowerCAmelCase_ = unet_config['''down_block_types''']
lowerCAmelCase_ = unet_config['''layers_per_block''']
lowerCAmelCase_ = unet_config['''attention_head_dim''']
lowerCAmelCase_ = unet_config['''block_out_channels''']
lowerCAmelCase_ = 1
lowerCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
lowerCAmelCase_ = channels_list[i]
lowerCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
lowerCAmelCase_ = F"down_blocks.{i}.resnets.{j}"
lowerCAmelCase_ = F"input_blocks.{current_layer}.0"
lowerCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
lowerCAmelCase_ = F"down_blocks.{i}.resnets.{j}"
lowerCAmelCase_ = F"input_blocks.{current_layer}.0"
lowerCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
lowerCAmelCase_ = F"down_blocks.{i}.attentions.{j}"
lowerCAmelCase_ = F"input_blocks.{current_layer}.1"
lowerCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
lowerCAmelCase_ = F"down_blocks.{i}.downsamplers.0"
lowerCAmelCase_ = F"input_blocks.{current_layer}.0"
lowerCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
lowerCAmelCase_ = current_channels
# hardcoded the mid-block for now
lowerCAmelCase_ = '''mid_block.resnets.0'''
lowerCAmelCase_ = '''middle_block.0'''
lowerCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
lowerCAmelCase_ = '''mid_block.attentions.0'''
lowerCAmelCase_ = '''middle_block.1'''
lowerCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
lowerCAmelCase_ = '''mid_block.resnets.1'''
lowerCAmelCase_ = '''middle_block.2'''
lowerCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
lowerCAmelCase_ = 0
lowerCAmelCase_ = unet_config['''up_block_types''']
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ = F"up_blocks.{i}.resnets.{j}"
lowerCAmelCase_ = F"output_blocks.{current_layer}.0"
lowerCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
lowerCAmelCase_ = F"up_blocks.{i}.upsamplers.0"
lowerCAmelCase_ = F"output_blocks.{current_layer-1}.1"
lowerCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ = F"up_blocks.{i}.resnets.{j}"
lowerCAmelCase_ = F"output_blocks.{current_layer}.0"
lowerCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
lowerCAmelCase_ = F"up_blocks.{i}.attentions.{j}"
lowerCAmelCase_ = F"output_blocks.{current_layer}.1"
lowerCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
lowerCAmelCase_ = F"up_blocks.{i}.upsamplers.0"
lowerCAmelCase_ = F"output_blocks.{current_layer-1}.2"
lowerCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
lowerCAmelCase_ = checkpoint['''out.0.weight''']
lowerCAmelCase_ = checkpoint['''out.0.bias''']
lowerCAmelCase_ = checkpoint['''out.2.weight''']
lowerCAmelCase_ = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = strabool(args.class_cond)
lowerCamelCase__ = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCamelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCamelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowerCamelCase__ = None
lowerCamelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCamelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCamelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCamelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
lowerCamelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCamelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 122 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase :
def __init__(self : Union[str, Any] , _A : Optional[int] , _A : List[str]=1_3 , _A : Union[str, Any]=7 , _A : Union[str, Any]=True , _A : List[str]=True , _A : str=True , _A : List[str]=9_9 , _A : List[Any]=3_2 , _A : Dict=5 , _A : Tuple=4 , _A : int=3_7 , _A : str="gelu" , _A : Optional[Any]=0.1 , _A : int=0.1 , _A : Union[str, Any]=5_1_2 , _A : int=1_6 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Dict=3 , _A : List[str]=4 , _A : Dict=None , ) -> List[str]:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
snake_case = self.vocab_size - 1
def UpperCAmelCase(self : Dict ) -> Union[str, Any]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase(self : Dict , _A : Optional[Any] , _A : Tuple , _A : int , _A : List[str] , *_A : Union[str, Any] ) -> Optional[int]:
snake_case = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
snake_case = model(_A , token_type_ids=_A , head_mask=_A )
snake_case = model(_A , token_type_ids=_A )
snake_case = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase(self : Tuple , _A : List[str] , _A : Tuple , _A : List[str] , _A : List[str] , *_A : Dict ) -> List[Any]:
snake_case = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
snake_case = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase(self : int , _A : str , _A : str , _A : Union[str, Any] , _A : Union[str, Any] , *_A : Tuple ) -> int:
snake_case = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
snake_case = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase(self : List[Any] , _A : Tuple , _A : Union[str, Any] , _A : str , _A : Dict , *_A : Optional[int] ) -> int:
snake_case = self.num_labels
snake_case = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase(self : Any ) -> Tuple:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( A_ , A_ , A_ , unittest.TestCase ):
UpperCAmelCase__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase__ : Any = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase(self : Union[str, Any] , _A : Union[str, Any] , _A : int , _A : List[str] , _A : Dict , _A : Optional[int] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase(self : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[str]=False ) -> int:
snake_case = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , )
snake_case = inputs_dict["labels"]
snake_case = inputs_dict["labels"]
snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , )
snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCAmelCase(self : Optional[Any] ) -> Any:
snake_case = OpenAIGPTModelTester(self )
snake_case = ConfigTester(self , config_class=_A , n_embd=3_7 )
def UpperCAmelCase(self : Union[str, Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase(self : Any ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def UpperCAmelCase(self : int ) -> Tuple:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def UpperCAmelCase(self : List[Any] ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def UpperCAmelCase(self : Tuple ) -> Any:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def UpperCAmelCase(self : Optional[int] ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase(self : str ) -> int:
snake_case = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(_A )
snake_case = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=_A ) # the president is
snake_case = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 294 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_A = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class lowerCamelCase :
def __init__(self : Any , _A : int = 1_4 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case = primes[group]["prime"]
snake_case = primes[group]["generator"]
snake_case = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def UpperCAmelCase(self : Any ) -> str:
return hex(self.__private_key )[2:]
def UpperCAmelCase(self : Tuple ) -> str:
snake_case = pow(self.generator , self.__private_key , self.prime )
return hex(_A )[2:]
def UpperCAmelCase(self : Optional[int] , _A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase(self : List[Any] , _A : str ) -> str:
snake_case = int(_A , base=1_6 )
if not self.is_valid_public_key(_A ):
raise ValueError("Invalid public key" )
snake_case = pow(_A , self.__private_key , self.prime )
return shaaaa(str(_A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase(_A : int , _A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_A , (prime - 1) // 2 , _A ) == 1
)
@staticmethod
def UpperCAmelCase(_A : str , _A : str , _A : int = 1_4 ) -> str:
snake_case = int(_A , base=1_6 )
snake_case = int(_A , base=1_6 )
snake_case = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(_A , _A ):
raise ValueError("Invalid public key" )
snake_case = pow(_A , _A , _A )
return shaaaa(str(_A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
A_ : List[Any] = 10
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = (left + right) // 3 + 1
__UpperCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__UpperCAmelCase = one_third - 1
elif array[two_third] < target:
__UpperCAmelCase = two_third + 1
else:
__UpperCAmelCase = one_third + 1
__UpperCAmelCase = two_third - 1
else:
return -1
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = (left + right) // 3 + 1
__UpperCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Union[str, Any] = input('Enter numbers separated by comma:\n').strip()
A_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
A_ : Tuple = int(input('Enter the number to be found in the list:\n').strip())
A_ : Optional[Any] = ite_ternary_search(collection, target)
A_ : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 303 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=2 , lowercase__=True , lowercase__=False , lowercase__=10 , lowercase__=3 , lowercase__=32 * 8 , lowercase__=32 * 8 , lowercase__=4 , lowercase__=64 , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = is_training
__UpperCAmelCase = use_auxiliary_loss
__UpperCAmelCase = num_queries
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_size
__UpperCAmelCase = max_size
__UpperCAmelCase = num_labels
__UpperCAmelCase = hidden_dim
__UpperCAmelCase = hidden_dim
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase__ )
__UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase__ )
__UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase__ ) > 0.5
).float()
__UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowercase__ ) > 0.5).long()
__UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCAmelCase = self.num_queries
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = [1, 1, 1, 1]
__UpperCAmelCase = self.num_channels
__UpperCAmelCase = 64
__UpperCAmelCase = 128
__UpperCAmelCase = self.hidden_dim
__UpperCAmelCase = self.hidden_dim
__UpperCAmelCase = self.hidden_dim
return config
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = output.encoder_hidden_states
__UpperCAmelCase = output.pixel_decoder_hidden_states
__UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase__ ) , config.decoder_layers )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__=False ) -> Union[str, Any]:
with torch.no_grad():
__UpperCAmelCase = MaskaFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(pixel_values=lowercase__ , pixel_mask=lowercase__ )
__UpperCAmelCase = model(lowercase__ , output_hidden_states=lowercase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
__UpperCAmelCase = MaskaFormerForUniversalSegmentation(config=lowercase__ )
model.to(lowercase__ )
model.eval()
def comm_check_on_output(lowercase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCAmelCase = model(pixel_values=lowercase__ , pixel_mask=lowercase__ )
__UpperCAmelCase = model(lowercase__ )
comm_check_on_output(lowercase__ )
__UpperCAmelCase = model(
pixel_values=lowercase__ , pixel_mask=lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__ )
comm_check_on_output(lowercase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a__ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = MaskaFormerModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
self.config_tester.run_common_tests()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase__ , **lowercase__ , output_hidden_states=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase__ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> Tuple:
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowerCAmelCase_ (self ) -> str:
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCAmelCase_ (self ) -> Optional[int]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> int:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCAmelCase = MaskaFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = (self.model_tester.min_size,) * 2
__UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowercase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowercase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowercase__ ).long(),
}
__UpperCAmelCase = self.model_tester.get_config()
__UpperCAmelCase = MaskaFormerForUniversalSegmentation(lowercase__ ).to(lowercase__ )
__UpperCAmelCase = model(**lowercase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase__ , **lowercase__ , output_hidden_states=lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ ).to(lowercase__ )
__UpperCAmelCase = model(**lowercase__ , output_attentions=lowercase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ (self ) -> str:
if not self.model_tester.is_training:
return
__UpperCAmelCase = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
__UpperCAmelCase = model(lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__ ).loss
loss.backward()
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = model_class(lowercase__ ).to(lowercase__ )
model.train()
__UpperCAmelCase = model(lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__ )
__UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : List[Any] = 1e-4
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> Union[str, Any]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ (self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
__UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase__ , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
__UpperCAmelCase = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowercase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase__ , atol=lowercase__ ) )
__UpperCAmelCase = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowercase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase__ , atol=lowercase__ ) )
__UpperCAmelCase = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowercase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase__ , atol=lowercase__ ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase__ ).eval()
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
__UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase__ , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# masks_queries_logits
__UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCAmelCase = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__UpperCAmelCase = torch.tensor(lowercase__ ).to(lowercase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase__ , atol=lowercase__ ) )
# class_queries_logits
__UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase__ , atol=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase__ ).eval()
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
__UpperCAmelCase = inputs['''pixel_values'''].to(lowercase__ )
__UpperCAmelCase = [el.to(lowercase__ ) for el in inputs['''mask_labels''']]
__UpperCAmelCase = [el.to(lowercase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
self.assertTrue(outputs.loss is not None )
| 303 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ):
model.train()
UpperCamelCase__ : Optional[int] = model(_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__=False ):
set_seed(4_2 )
UpperCamelCase__ : Dict = RegressionModel()
UpperCamelCase__ : Optional[Any] = deepcopy(_lowerCamelCase )
UpperCamelCase__ : str = RegressionDataset(length=8_0 )
UpperCamelCase__ : int = DataLoader(_lowerCamelCase , batch_size=1_6 )
model.to(accelerator.device )
if sched:
UpperCamelCase__ : List[str] = AdamW(params=model.parameters() , lr=1e-3 )
UpperCamelCase__ : Dict = AdamW(params=ddp_model.parameters() , lr=1e-3 )
UpperCamelCase__ : Tuple = LambdaLR(_lowerCamelCase , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
UpperCamelCase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCamelCase__ : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
UpperCamelCase__ : Any = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# Test when on a single CPU or GPU that the context manager does nothing
UpperCamelCase__ : str = get_training_setup(_lowerCamelCase )
# Use a single batch
UpperCamelCase__ : List[str] = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ : int = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
UpperCamelCase__ : List[Any] = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# Test on distributed setup that context manager behaves properly
UpperCamelCase__ : List[Any] = get_training_setup(_lowerCamelCase )
# Use a single batch
UpperCamelCase__ : List[Any] = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ : List[str] = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
UpperCamelCase__ : Optional[Any] = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__=False , UpperCamelCase__=False ):
UpperCamelCase__ : Tuple = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase__ : List[Any] = get_training_setup(_lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
UpperCamelCase__ : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ : Dict = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
UpperCamelCase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__=False , UpperCamelCase__=False ):
UpperCamelCase__ : Any = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase__ : Any = get_training_setup(_lowerCamelCase , _lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
UpperCamelCase__ : Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase__ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
UpperCamelCase__ : Optional[int] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : List[Any] = Accelerator()
UpperCamelCase__ : str = RegressionDataset(length=8_0 )
UpperCamelCase__ : Dict = DataLoader(_lowerCamelCase , batch_size=1_6 )
UpperCamelCase__ : int = RegressionDataset(length=9_6 )
UpperCamelCase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=1_6 )
UpperCamelCase__ : List[str] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if iteration < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if batch_num < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : int = Accelerator()
UpperCamelCase__ : str = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(_lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(_lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 719 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = to_pil_image(UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = pil_image.size
UpperCamelCase__ : str = pytesseract.image_to_data(UpperCamelCase__ , lang=UpperCamelCase__ , output_type='''dict''' , config=UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
UpperCamelCase__ : Optional[Any] = [idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
UpperCamelCase__ : int = [word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : int = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[Any] = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[Any] = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : Dict = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : str = []
for x, y, w, h in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
UpperCamelCase__ : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_5_5 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "" , **__SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCamelCase__ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : Optional[int] = size
UpperCamelCase__ : Optional[Any] = resample
UpperCamelCase__ : Optional[Any] = do_rescale
UpperCamelCase__ : str = rescale_value
UpperCamelCase__ : Any = do_normalize
UpperCamelCase__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCamelCase__ : Dict = apply_ocr
UpperCamelCase__ : List[Any] = ocr_lang
UpperCamelCase__ : str = tesseract_config
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ : int = (size['''height'''], size['''width'''])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[str] = size if size is not None else self.size
UpperCamelCase__ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = resample if resample is not None else self.resample
UpperCamelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : Tuple = image_std if image_std is not None else self.image_std
UpperCamelCase__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : List[Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : List[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : List[Any] = []
for image in images:
UpperCamelCase__ ,UpperCamelCase__ : Tuple = apply_tesseract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
words_batch.append(__SCREAMING_SNAKE_CASE )
boxes_batch.append(__SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase__ : str = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase__ : str = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : int = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase__ : str = words_batch
UpperCamelCase__ : Union[str, Any] = boxes_batch
return data
| 462 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 645 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="SpeechT5FeatureExtractor"
a : Any ="SpeechT5Tokenizer"
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = kwargs.pop("audio" , snake_case__ )
lowerCAmelCase : Tuple = kwargs.pop("text" , snake_case__ )
lowerCAmelCase : str = kwargs.pop("text_target" , snake_case__ )
lowerCAmelCase : List[str] = kwargs.pop("audio_target" , snake_case__ )
lowerCAmelCase : Union[str, Any] = kwargs.pop("sampling_rate" , snake_case__ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
lowerCAmelCase : int = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
elif text is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(snake_case__ , **snake_case__ )
else:
lowerCAmelCase : Union[str, Any] = None
if audio_target is not None:
lowerCAmelCase : Optional[Any] = self.feature_extractor(audio_target=snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
lowerCAmelCase : Any = targets["input_values"]
elif text_target is not None:
lowerCAmelCase : Tuple = self.tokenizer(snake_case__ , **snake_case__ )
lowerCAmelCase : str = targets["input_ids"]
else:
lowerCAmelCase : str = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : List[str] = labels
lowerCAmelCase : List[Any] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase : Union[str, Any] = decoder_attention_mask
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = kwargs.pop("input_values" , snake_case__ )
lowerCAmelCase : List[Any] = kwargs.pop("input_ids" , snake_case__ )
lowerCAmelCase : Dict = kwargs.pop("labels" , snake_case__ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
lowerCAmelCase : int = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
elif input_ids is not None:
lowerCAmelCase : Optional[Any] = self.tokenizer.pad(snake_case__ , **snake_case__ )
else:
lowerCAmelCase : Optional[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(snake_case__ , snake_case__ ) and "input_ids" in labels[0]):
lowerCAmelCase : Tuple = self.tokenizer.pad(snake_case__ , **snake_case__ )
lowerCAmelCase : Any = targets["input_ids"]
else:
lowerCAmelCase : List[Any] = self.feature_extractor.feature_size
lowerCAmelCase : Optional[int] = self.feature_extractor.num_mel_bins
lowerCAmelCase : str = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = feature_size_hack
lowerCAmelCase : Optional[Any] = targets["input_values"]
else:
lowerCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : int = labels
lowerCAmelCase : Optional[int] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase : List[Any] = decoder_attention_mask
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
| 645 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase = logging.get_logger(__name__)
# TODO: upload to AWS
lowercase = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Union[str, Any] = '''retribert'''
def __init__( self , a__=30522 , a__=768 , a__=8 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.02 , a__=1e-12 , a__=True , a__=128 , a__=0 , **a__ , ):
super().__init__(pad_token_id=a__ , **a__ )
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = share_encoders
__SCREAMING_SNAKE_CASE : Dict = projection_dim
| 564 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return EnvironmentCommand()
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def a_ ( a__ ):
__SCREAMING_SNAKE_CASE : int = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = huggingface_hub.__version__
__SCREAMING_SNAKE_CASE : Dict = "not installed"
__SCREAMING_SNAKE_CASE : Dict = "NA"
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : List[str] = torch.__version__
__SCREAMING_SNAKE_CASE : List[str] = torch.cuda.is_available()
__SCREAMING_SNAKE_CASE : Optional[Any] = "not installed"
if is_transformers_available():
import transformers
__SCREAMING_SNAKE_CASE : str = transformers.__version__
__SCREAMING_SNAKE_CASE : List[str] = "not installed"
if is_accelerate_available():
import accelerate
__SCREAMING_SNAKE_CASE : List[str] = accelerate.__version__
__SCREAMING_SNAKE_CASE : Union[str, Any] = "not installed"
if is_xformers_available():
import xformers
__SCREAMING_SNAKE_CASE : List[Any] = xformers.__version__
__SCREAMING_SNAKE_CASE : int = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def a_ ( a__ ):
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 564 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase__: Optional[Any] = list[list[float | int]]
def snake_case_ ( _lowerCAmelCase : Matrix , _lowerCAmelCase : Matrix ) -> Matrix:
UpperCAmelCase : int = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE__ )]
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float
for row in range(SCREAMING_SNAKE_CASE__ ):
for col in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : List[Any] = matrix[row][col]
UpperCAmelCase : List[Any] = vector[row][0]
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCAmelCase : List[str] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : str = augmented[rowa][col] / augmented[row][col]
UpperCAmelCase : int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , SCREAMING_SNAKE_CASE__ ):
for row in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(SCREAMING_SNAKE_CASE__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE__ )
]
def snake_case_ ( _lowerCAmelCase : list[int] ) -> Callable[[int], int]:
UpperCAmelCase : int = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Matrix = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
UpperCAmelCase : Matrix = [[0] for _ in range(SCREAMING_SNAKE_CASE__ )]
UpperCAmelCase : Matrix
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE__ ):
for col in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : List[str] = (x_val + 1) ** (size - col - 1)
UpperCAmelCase : Tuple = y_val
UpperCAmelCase : Union[str, Any] = solve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def interpolated_func(_lowerCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(SCREAMING_SNAKE_CASE__ ) )
return interpolated_func
def snake_case_ ( _lowerCAmelCase : int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def snake_case_ ( _lowerCAmelCase : Callable[[int], int] = question_function , _lowerCAmelCase : int = 10 ) -> int:
UpperCAmelCase : list[int] = [func(SCREAMING_SNAKE_CASE__ ) for x_val in range(1 , order + 1 )]
UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCAmelCase : int = 0
UpperCAmelCase : Callable[[int], int]
UpperCAmelCase : int
for poly in polynomials:
UpperCAmelCase : Tuple = 1
while func(SCREAMING_SNAKE_CASE__ ) == poly(SCREAMING_SNAKE_CASE__ ):
x_val += 1
ret += poly(SCREAMING_SNAKE_CASE__ )
return ret
if __name__ == "__main__":
print(F"{solution() = }")
| 127 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor''']
_lowerCamelCase : List[str] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 663 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a( __snake_case , unittest.TestCase ):
lowerCamelCase__ :Union[str, Any] = OpenAIGPTTokenizer
lowerCamelCase__ :Optional[Any] = OpenAIGPTTokenizerFast
lowerCamelCase__ :Tuple = True
lowerCamelCase__ :List[Any] = False
def lowercase ( self ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_snake_case : Any = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_snake_case : Optional[int] = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def lowercase ( self , __snake_case ) -> Dict:
'''simple docstring'''
return "lower newer", "lower newer"
def lowercase ( self ) -> Tuple:
'''simple docstring'''
_snake_case : Any = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_snake_case : Union[str, Any] = "lower"
_snake_case : List[str] = ["low", "er</w>"]
_snake_case : str = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_snake_case : Optional[Any] = tokens + ["<unk>"]
_snake_case : Any = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowercase ( self , __snake_case=1_5 ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# Simple input
_snake_case : Optional[int] = "This is a simple input"
_snake_case : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Optional[Any] = ("This is a simple input", "This is a pair")
_snake_case : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" , )
def lowercase ( self ) -> Dict:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _a( __snake_case ):
pass
| 702 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _a:
def __init__( self , __snake_case , ) -> List[str]:
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Any = 1_3
_snake_case : Union[str, Any] = 7
_snake_case : Any = True
_snake_case : Optional[Any] = True
_snake_case : List[str] = False
_snake_case : str = True
_snake_case : int = 9_9
_snake_case : Optional[int] = 3_2
_snake_case : Dict = 2
_snake_case : Optional[Any] = 4
_snake_case : Any = 3_7
_snake_case : Optional[int] = "gelu"
_snake_case : Optional[Any] = 0.1
_snake_case : List[Any] = 0.1
_snake_case : Any = 5_1_2
_snake_case : Optional[int] = 1_6
_snake_case : Dict = 2
_snake_case : Optional[int] = 0.02
_snake_case : Any = 3
_snake_case : Tuple = 4
_snake_case : str = None
def lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = None
if self.use_input_mask:
_snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : str = None
_snake_case : Optional[Any] = None
_snake_case : Any = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
_snake_case : str = TFDistilBertModel(config=__snake_case )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
_snake_case : Union[str, Any] = model(__snake_case )
_snake_case : Union[str, Any] = [input_ids, input_mask]
_snake_case : int = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
'''simple docstring'''
_snake_case : str = TFDistilBertForMaskedLM(config=__snake_case )
_snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
_snake_case : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : int = TFDistilBertForQuestionAnswering(config=__snake_case )
_snake_case : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
_snake_case : Tuple = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Dict = self.num_labels
_snake_case : Dict = TFDistilBertForSequenceClassification(__snake_case )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
_snake_case : Optional[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
_snake_case : str = self.num_choices
_snake_case : List[str] = TFDistilBertForMultipleChoice(__snake_case )
_snake_case : Optional[int] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case : int = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
_snake_case : str = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
_snake_case : List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : int = TFDistilBertForTokenClassification(__snake_case )
_snake_case : Any = {"input_ids": input_ids, "attention_mask": input_mask}
_snake_case : Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self ) -> Tuple:
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : List[str] = config_and_inputs
_snake_case : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a( __A , __A , unittest.TestCase ):
lowerCamelCase__ :str = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowerCamelCase__ :Tuple = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ :Tuple = False
lowerCamelCase__ :Any = False
def lowercase ( self ) -> int:
'''simple docstring'''
_snake_case : Optional[Any] = TFDistilBertModelTester(self )
_snake_case : str = ConfigTester(self , config_class=__snake_case , dim=3_7 )
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__snake_case )
def lowercase ( self ) -> Any:
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__snake_case )
def lowercase ( self ) -> List[str]:
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__snake_case )
def lowercase ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__snake_case )
def lowercase ( self ) -> Dict:
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__snake_case )
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__snake_case )
@slow
def lowercase ( self ) -> Tuple:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_snake_case : Any = TFDistilBertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
class _a( unittest.TestCase ):
@slow
def lowercase ( self ) -> List[str]:
'''simple docstring'''
_snake_case : Any = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
_snake_case : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : Dict = model(__snake_case )[0]
_snake_case : str = [1, 6, 7_6_8]
self.assertEqual(output.shape , __snake_case )
_snake_case : Optional[Any] = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-4 )
| 278 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = False
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
UpperCAmelCase_ = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
UpperCAmelCase_ = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
UpperCAmelCase_ = reader.read()
UpperCAmelCase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
UpperCAmelCase_ = UNetaDModel(**config)
else:
UpperCAmelCase_ = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
UpperCAmelCase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase_ = config[key]
del config[key]
UpperCAmelCase_ = [k.replace("UNetRes", "") for k in config["down_block_types"]]
UpperCAmelCase_ = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
UpperCAmelCase_ = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
UpperCAmelCase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
UpperCAmelCase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
UpperCAmelCase_ = param_value
UpperCAmelCase_ = True
if not has_changed:
UpperCAmelCase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 32 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def lowerCAmelCase ( *snake_case , **snake_case) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( __lowerCamelCase : Image ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def lowerCamelCase__ ( __lowerCamelCase : Image ):
'''simple docstring'''
_UpperCAmelCase : List[str] =np.array(__lowerCamelCase )
_UpperCAmelCase : List[str] =npimg.shape
return {"hash": hashimage(__lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCAmelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] =MaskGenerationPipeline(model=snake_case , image_processor=snake_case)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCAmelCase ( self , snake_case , snake_case) -> Optional[Any]:
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
@require_torch
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Any =pipeline('mask-generation' , model='facebook/sam-vit-huge')
_UpperCAmelCase : Union[str, Any] =image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_5_6)
# Shortening by hashing
_UpperCAmelCase : str =[]
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(snake_case), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] ='facebook/sam-vit-huge'
_UpperCAmelCase : Optional[int] =pipeline('mask-generation' , model=snake_case)
_UpperCAmelCase : str =image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_5_6)
# Shortening by hashing
_UpperCAmelCase : Tuple =[]
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(snake_case), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.00_53},
] , )
| 446 | 0 |
"""simple docstring"""
import operator as op
lowerCamelCase__ = """scaler.pt"""
lowerCamelCase__ = """pytorch_model"""
lowerCamelCase__ = """random_states"""
lowerCamelCase__ = """optimizer"""
lowerCamelCase__ = """scheduler"""
lowerCamelCase__ = """pytorch_model.bin"""
lowerCamelCase__ = """pytorch_model.bin.index.json"""
lowerCamelCase__ = """model.safetensors"""
lowerCamelCase__ = """model.safetensors.index.json"""
lowerCamelCase__ = """1.10.2"""
lowerCamelCase__ = """py38"""
lowerCamelCase__ = """4.17.0"""
lowerCamelCase__ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCamelCase__ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCamelCase__ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCamelCase__ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCamelCase__ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCamelCase__ = """2.0.1"""
lowerCamelCase__ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCamelCase__ = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCamelCase__ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCamelCase__ = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCamelCase__ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCamelCase__ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 549 |
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 549 | 1 |
"""simple docstring"""
from PIL import Image
def __lowerCAmelCase ( __UpperCamelCase : Image , __UpperCamelCase : float ):
'''simple docstring'''
def brightness(__UpperCamelCase : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__lowerCAmelCase : Tuple = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58 |
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowerCamelCase : Dict = logging.getLogger(__name__)
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = "token-classification"
def __init__( self : Tuple , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
if type(UpperCamelCase_ ) == dict:
lowerCamelCase_ : Tuple = Namespace(**UpperCamelCase_ )
lowerCamelCase_ : Any = import_module('''tasks''' )
try:
lowerCamelCase_ : Any = getattr(UpperCamelCase_ , hparams.task_type )
lowerCamelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCamelCase_ : int = self.token_classification_task.get_labels(hparams.labels )
lowerCamelCase_ : Optional[Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Dict , **UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
return self.model(**UpperCamelCase_ )
def __UpperCamelCase ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ : str = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ : List[str] = self(**UpperCamelCase_ )
lowerCamelCase_ : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = self.hparams
for mode in ["train", "dev", "test"]:
lowerCamelCase_ : List[Any] = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowerCamelCase_ : Dict = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowerCamelCase_ : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
lowerCamelCase_ : int = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCamelCase_ : Dict = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = torch.load(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase_ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCamelCase_ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCamelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCamelCase_ : Optional[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
"""Compute validation""" ""
lowerCamelCase_ : Optional[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ : Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ : List[Any] = self(**UpperCamelCase_ )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = outputs[:2]
lowerCamelCase_ : List[Any] = logits.detach().cpu().numpy()
lowerCamelCase_ : str = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Tuple = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowerCamelCase_ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowerCamelCase_ : Optional[Any] = np.argmax(UpperCamelCase_ , axis=2 )
lowerCamelCase_ : List[str] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowerCamelCase_ : List[str] = dict(enumerate(self.labels ) )
lowerCamelCase_ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ : str = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCamelCase_ : Optional[Any] = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
lowerCamelCase_ : Union[str, Any] = dict(results.items() )
lowerCamelCase_ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = self._eval_end(UpperCamelCase_ )
lowerCamelCase_ : Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCamelCase_ : Optional[int] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowerCamelCase : Optional[int] = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : List[Any] = NERTransformer(args)
__lowerCamelCase : Any = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowerCamelCase : Any = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
__lowerCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 501 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_lowercase = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def A (__lowerCamelCase :Optional[int] ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_lowerCAmelCase = list(s_dict.keys() )
for key in keys:
_lowerCAmelCase = r""".*/layers_(\d+)"""
_lowerCAmelCase = key
if re.match(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , __lowerCamelCase )
_lowerCAmelCase = r"""(encoder|decoder)\/"""
if re.match(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = re.match(__lowerCamelCase , __lowerCamelCase ).groups()
if groups[0] == "encoder":
_lowerCAmelCase = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , __lowerCamelCase )
_lowerCAmelCase = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , __lowerCamelCase )
elif groups[0] == "decoder":
_lowerCAmelCase = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , __lowerCamelCase )
_lowerCAmelCase = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , __lowerCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_lowerCAmelCase = new_key.replace(__lowerCamelCase , __lowerCamelCase )
print(f'{key} -> {new_key}' )
_lowerCAmelCase = s_dict.pop(__lowerCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCAmelCase = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCAmelCase = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_lowerCAmelCase = s_dict[key].shape[0]
_lowerCAmelCase = s_dict[key]
for idx in range(__lowerCamelCase ):
_lowerCAmelCase = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(__lowerCamelCase )
return s_dict
_lowercase = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Tuple ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(__lowerCamelCase , """r""" ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase = re.findall(r"""(.*) = ([0-9.]*)""" , __lowerCamelCase )
_lowerCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_lowerCAmelCase = float(__lowerCamelCase ) if """.""" in value else int(__lowerCamelCase )
_lowerCAmelCase = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , __lowerCamelCase )[0]
_lowerCAmelCase = str(activation[1] )
_lowerCAmelCase = num_experts
_lowerCAmelCase = SwitchTransformersConfig(**__lowerCamelCase )
return config
def A (__lowerCamelCase :str , __lowerCamelCase :List[Any] , __lowerCamelCase :int=None , __lowerCamelCase :Tuple="./" , __lowerCamelCase :Tuple=8 ):
# Initialise PyTorch model
print(f'Loading flax weights from : {flax_checkpoint_path}' )
_lowerCAmelCase = checkpoints.load_tax_checkpoint(__lowerCamelCase )
if gin_file is not None:
_lowerCAmelCase = convert_gin_to_config(__lowerCamelCase , __lowerCamelCase )
else:
_lowerCAmelCase = SwitchTransformersConfig.from_pretrained(__lowerCamelCase )
_lowerCAmelCase = SwitchTransformersForConditionalGeneration(__lowerCamelCase )
_lowerCAmelCase = flax_params["""target"""]
_lowerCAmelCase = flatten_dict(__lowerCamelCase , sep="""/""" )
_lowerCAmelCase = rename_keys(__lowerCamelCase )
_lowerCAmelCase = unflatten_dict(__lowerCamelCase , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__lowerCamelCase , __lowerCamelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
_lowercase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 162 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = 13
_lowerCAmelCase = 7
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = 99
_lowerCAmelCase = 384
_lowerCAmelCase = 2
_lowerCAmelCase = 4
_lowerCAmelCase = 37
_lowerCAmelCase = """gelu"""
_lowerCAmelCase = 0.1
_lowerCAmelCase = 0.1
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 2
_lowerCAmelCase = 0.02
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = 2
_lowerCAmelCase = 9
_lowerCAmelCase = 1
_lowerCAmelCase = None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel(config=_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(_lowercase )
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForMaskedLM(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForSequenceClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFConvBertForMultipleChoice(config=_lowercase )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForTokenClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForQuestionAnswering(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : int = False
_lowercase : str = False
_lowercase : Any = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = True
if hasattr(_lowercase , """use_cache""" ):
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
for model_class in self.all_model_classes:
_lowerCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
_lowerCAmelCase = os.path.join(_lowercase , """saved_model""" , """1""" )
_lowerCAmelCase = tf.keras.models.load_model(_lowercase )
_lowerCAmelCase = model(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = outputs["""encoder_hidden_states"""]
_lowerCAmelCase = outputs["""encoder_attentions"""]
else:
_lowerCAmelCase = outputs["""hidden_states"""]
_lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(_lowercase ) , _lowercase )
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
def check_decoder_attentions_output(_lowercase ):
_lowerCAmelCase = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
_lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowercase ):
_lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(_lowercase )[0]
_lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
_lowerCAmelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 162 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : int ):
A__ = num_of_nodes
A__ = []
A__ = {}
def A__ ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
self.m_edges.append([u_node, v_node, weight] )
def A__ ( self : Optional[Any] , _lowerCamelCase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def A__ ( self : Union[str, Any] , _lowerCamelCase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
A__ = self.find_component(_lowerCamelCase )
def A__ ( self : int , _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int ):
if component_size[u_node] <= component_size[v_node]:
A__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
A__ = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def A__ ( self : str ):
A__ = []
A__ = 0
A__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
A__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
A__ , A__ , A__ = edge
A__ = self.m_component[u]
A__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
A__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ , A__ , A__ = edge
A__ = self.m_component[u]
A__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
A__ = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def a_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCamelCase ( a ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str]=1_3 , _lowerCamelCase : Dict=7 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=True , _lowerCamelCase : int=False , _lowerCamelCase : int=True , _lowerCamelCase : Optional[Any]=9_9 , _lowerCamelCase : Optional[Any]=3_2 , _lowerCamelCase : List[str]=5 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : str=6_4 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : List[Any]=5_1_2 , _lowerCamelCase : Tuple=1_6 , _lowerCamelCase : str=2 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : Tuple=4 , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=2 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : str=2 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Tuple=4 , _lowerCamelCase : Dict=1 , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = q_groups
A__ = k_groups
A__ = v_groups
A__ = post_attention_groups
A__ = intermediate_groups
A__ = output_groups
def A__ ( self : Union[str, Any] ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def A__ ( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str ):
A__ = SqueezeBertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase , _lowerCamelCase )
A__ = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
A__ = SqueezeBertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str ):
A__ = SqueezeBertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ):
A__ = self.num_labels
A__ = SqueezeBertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] ):
A__ = self.num_labels
A__ = SqueezeBertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ):
A__ = self.num_choices
A__ = SqueezeBertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Union[str, Any] ):
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any =(
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowerCamelCase : List[str] =(
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Tuple =False
_lowerCamelCase : Optional[Any] =True
_lowerCamelCase : str =False
def A__ ( self : Optional[int] ):
A__ = SqueezeBertModelTester(self )
A__ = ConfigTester(self , config_class=_lowerCamelCase , dim=3_7 )
def A__ ( self : int ):
self.config_tester.run_common_tests()
def A__ ( self : List[str] ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_lowerCamelCase )
def A__ ( self : List[Any] ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_lowerCamelCase )
def A__ ( self : Dict ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_lowerCamelCase )
def A__ ( self : Tuple ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_lowerCamelCase )
def A__ ( self : Dict ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_lowerCamelCase )
def A__ ( self : Dict ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_lowerCamelCase )
@slow
def A__ ( self : Any ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SqueezeBertModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Optional[Any] ):
A__ = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
A__ = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
A__ = model(_lowerCamelCase )[0]
A__ = torch.Size((1, 3) )
self.assertEqual(output.shape , _lowerCamelCase )
A__ = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-4 ) )
| 571 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ : int = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ : Optional[int] = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ : int = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ : Optional[Any] = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ : Optional[int] = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ : int = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ : List[str] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ : Tuple = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ : int = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ : Any = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ : int = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ : Optional[Any] = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ : List[str] = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ : str = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ : Optional[int] = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ : List[str] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : int = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : Any = key.split("." )
lowerCAmelCase__ , lowerCAmelCase__ : str = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ : List[str] = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Dict = val[:dim, :]
lowerCAmelCase__ : Any = val[dim : dim * 2, :]
lowerCAmelCase__ : Optional[int] = val[-dim:, :]
else:
lowerCAmelCase__ : List[str] = val[:dim]
lowerCAmelCase__ : List[Any] = val[dim : dim * 2]
lowerCAmelCase__ : str = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : List[Any] = key.split("." )
lowerCAmelCase__ : Dict = int(key_split[3] )
lowerCAmelCase__ : str = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : List[Any] = val[:dim, :]
lowerCAmelCase__ : Any = val[
dim : dim * 2, :
]
lowerCAmelCase__ : Dict = val[-dim:, :]
else:
lowerCAmelCase__ : str = val[:dim]
lowerCAmelCase__ : str = val[dim : dim * 2]
lowerCAmelCase__ : Optional[int] = val[-dim:]
else:
lowerCAmelCase__ : List[str] = rename_key(lowerCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ : Optional[Any] = val.squeeze_()
else:
lowerCAmelCase__ : List[str] = val
return orig_state_dict
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="groupvit-gcc-yfcc" , lowerCamelCase_=False ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = GroupViTConfig()
lowerCAmelCase__ : Any = GroupViTModel(lowerCamelCase_ ).eval()
lowerCAmelCase__ : Tuple = torch.load(lowerCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ : Optional[int] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ : Union[str, Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : int = processor(text=["a photo of a cat", "a photo of a dog"] , images=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**lowerCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ : int = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , lowerCamelCase_ , atol=1e-3 )
processor.save_pretrained(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
print("Successfully saved processor and model to" , lowerCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(lowerCamelCase_ , organization="nielsr" )
model.push_to_hub(lowerCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
snake_case = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 568 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.